var/home/core/zuul-output/0000755000175000017500000000000015111055035014521 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111075600015465 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005651233215111075571017707 0ustar rootrootNov 24 13:18:06 crc systemd[1]: Starting Kubernetes Kubelet... Nov 24 13:18:06 crc restorecon[4763]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:06 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 13:18:07 crc restorecon[4763]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 13:18:07 crc restorecon[4763]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 24 13:18:08 crc kubenswrapper[5039]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 24 13:18:08 crc kubenswrapper[5039]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 24 13:18:08 crc kubenswrapper[5039]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 24 13:18:08 crc kubenswrapper[5039]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 24 13:18:08 crc kubenswrapper[5039]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 24 13:18:08 crc kubenswrapper[5039]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.014773 5039 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025600 5039 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025634 5039 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025645 5039 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025654 5039 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025663 5039 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025672 5039 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025680 5039 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025690 5039 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025699 5039 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025708 5039 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025719 5039 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025728 5039 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025739 5039 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025750 5039 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025760 5039 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025771 5039 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025781 5039 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025789 5039 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025797 5039 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025805 5039 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025812 5039 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025820 5039 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025829 5039 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025837 5039 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025844 5039 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025852 5039 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025860 5039 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025868 5039 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025876 5039 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025883 5039 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025891 5039 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025925 5039 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025934 5039 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025943 5039 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025964 5039 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025973 5039 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025982 5039 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025990 5039 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.025998 5039 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026006 5039 feature_gate.go:330] unrecognized feature gate: Example Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026014 5039 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026021 5039 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026029 5039 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026037 5039 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026045 5039 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026053 5039 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026062 5039 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026070 5039 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026078 5039 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026092 5039 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026100 5039 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026107 5039 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026115 5039 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026124 5039 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026132 5039 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026143 5039 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026153 5039 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026162 5039 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026171 5039 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026180 5039 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026188 5039 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026196 5039 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026206 5039 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026215 5039 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026224 5039 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026232 5039 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026241 5039 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026249 5039 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026258 5039 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026267 5039 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.026274 5039 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026413 5039 flags.go:64] FLAG: --address="0.0.0.0" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026430 5039 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026443 5039 flags.go:64] FLAG: --anonymous-auth="true" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026455 5039 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026467 5039 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026476 5039 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026488 5039 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026499 5039 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026541 5039 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026551 5039 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026561 5039 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026572 5039 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026581 5039 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026590 5039 flags.go:64] FLAG: --cgroup-root="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026599 5039 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026609 5039 flags.go:64] FLAG: --client-ca-file="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026618 5039 flags.go:64] FLAG: --cloud-config="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026626 5039 flags.go:64] FLAG: --cloud-provider="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026635 5039 flags.go:64] FLAG: --cluster-dns="[]" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026647 5039 flags.go:64] FLAG: --cluster-domain="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026656 5039 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026665 5039 flags.go:64] FLAG: --config-dir="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026674 5039 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026683 5039 flags.go:64] FLAG: --container-log-max-files="5" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026694 5039 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026703 5039 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026712 5039 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026722 5039 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026730 5039 flags.go:64] FLAG: --contention-profiling="false" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026740 5039 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026749 5039 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026758 5039 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026767 5039 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026779 5039 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026788 5039 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026797 5039 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026806 5039 flags.go:64] FLAG: --enable-load-reader="false" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026815 5039 flags.go:64] FLAG: --enable-server="true" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026824 5039 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026835 5039 flags.go:64] FLAG: --event-burst="100" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026844 5039 flags.go:64] FLAG: --event-qps="50" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026853 5039 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026862 5039 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026872 5039 flags.go:64] FLAG: --eviction-hard="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026883 5039 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026892 5039 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026901 5039 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026913 5039 flags.go:64] FLAG: --eviction-soft="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026923 5039 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026932 5039 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026941 5039 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026950 5039 flags.go:64] FLAG: --experimental-mounter-path="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026959 5039 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026969 5039 flags.go:64] FLAG: --fail-swap-on="true" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026978 5039 flags.go:64] FLAG: --feature-gates="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.026998 5039 flags.go:64] FLAG: --file-check-frequency="20s" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027007 5039 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027017 5039 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027026 5039 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027036 5039 flags.go:64] FLAG: --healthz-port="10248" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027045 5039 flags.go:64] FLAG: --help="false" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027055 5039 flags.go:64] FLAG: --hostname-override="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027064 5039 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027073 5039 flags.go:64] FLAG: --http-check-frequency="20s" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027083 5039 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027093 5039 flags.go:64] FLAG: --image-credential-provider-config="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027103 5039 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027113 5039 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027122 5039 flags.go:64] FLAG: --image-service-endpoint="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027132 5039 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027141 5039 flags.go:64] FLAG: --kube-api-burst="100" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027151 5039 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027161 5039 flags.go:64] FLAG: --kube-api-qps="50" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027169 5039 flags.go:64] FLAG: --kube-reserved="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027178 5039 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027187 5039 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027197 5039 flags.go:64] FLAG: --kubelet-cgroups="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027206 5039 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027216 5039 flags.go:64] FLAG: --lock-file="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027225 5039 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027234 5039 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027243 5039 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027256 5039 flags.go:64] FLAG: --log-json-split-stream="false" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027267 5039 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027276 5039 flags.go:64] FLAG: --log-text-split-stream="false" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027285 5039 flags.go:64] FLAG: --logging-format="text" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027294 5039 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027304 5039 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027312 5039 flags.go:64] FLAG: --manifest-url="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027321 5039 flags.go:64] FLAG: --manifest-url-header="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027332 5039 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027341 5039 flags.go:64] FLAG: --max-open-files="1000000" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027352 5039 flags.go:64] FLAG: --max-pods="110" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027361 5039 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027370 5039 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027379 5039 flags.go:64] FLAG: --memory-manager-policy="None" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027387 5039 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027398 5039 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027407 5039 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027416 5039 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027436 5039 flags.go:64] FLAG: --node-status-max-images="50" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027445 5039 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027454 5039 flags.go:64] FLAG: --oom-score-adj="-999" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027464 5039 flags.go:64] FLAG: --pod-cidr="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027473 5039 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027485 5039 flags.go:64] FLAG: --pod-manifest-path="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027494 5039 flags.go:64] FLAG: --pod-max-pids="-1" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027530 5039 flags.go:64] FLAG: --pods-per-core="0" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027540 5039 flags.go:64] FLAG: --port="10250" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027549 5039 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027558 5039 flags.go:64] FLAG: --provider-id="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027567 5039 flags.go:64] FLAG: --qos-reserved="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027576 5039 flags.go:64] FLAG: --read-only-port="10255" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027586 5039 flags.go:64] FLAG: --register-node="true" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027595 5039 flags.go:64] FLAG: --register-schedulable="true" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027604 5039 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027618 5039 flags.go:64] FLAG: --registry-burst="10" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027627 5039 flags.go:64] FLAG: --registry-qps="5" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027637 5039 flags.go:64] FLAG: --reserved-cpus="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027649 5039 flags.go:64] FLAG: --reserved-memory="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027661 5039 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027671 5039 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027680 5039 flags.go:64] FLAG: --rotate-certificates="false" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027690 5039 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027699 5039 flags.go:64] FLAG: --runonce="false" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027708 5039 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027717 5039 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027726 5039 flags.go:64] FLAG: --seccomp-default="false" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027736 5039 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027745 5039 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027755 5039 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027764 5039 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027773 5039 flags.go:64] FLAG: --storage-driver-password="root" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027783 5039 flags.go:64] FLAG: --storage-driver-secure="false" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027791 5039 flags.go:64] FLAG: --storage-driver-table="stats" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027800 5039 flags.go:64] FLAG: --storage-driver-user="root" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027809 5039 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027819 5039 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027828 5039 flags.go:64] FLAG: --system-cgroups="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027837 5039 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027853 5039 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027861 5039 flags.go:64] FLAG: --tls-cert-file="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027870 5039 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027883 5039 flags.go:64] FLAG: --tls-min-version="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027893 5039 flags.go:64] FLAG: --tls-private-key-file="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027902 5039 flags.go:64] FLAG: --topology-manager-policy="none" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027911 5039 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027920 5039 flags.go:64] FLAG: --topology-manager-scope="container" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027929 5039 flags.go:64] FLAG: --v="2" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027941 5039 flags.go:64] FLAG: --version="false" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027952 5039 flags.go:64] FLAG: --vmodule="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027962 5039 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.027972 5039 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028172 5039 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028183 5039 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028194 5039 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028202 5039 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028211 5039 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028220 5039 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028228 5039 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028237 5039 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028246 5039 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028255 5039 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028266 5039 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028277 5039 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028289 5039 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028299 5039 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028310 5039 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028319 5039 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028328 5039 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028337 5039 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028346 5039 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028353 5039 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028362 5039 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028369 5039 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028378 5039 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028386 5039 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028394 5039 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028402 5039 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028410 5039 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028418 5039 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028426 5039 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028434 5039 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028444 5039 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028453 5039 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028462 5039 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028471 5039 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028479 5039 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028487 5039 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028494 5039 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028528 5039 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028538 5039 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028554 5039 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028562 5039 feature_gate.go:330] unrecognized feature gate: Example Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028570 5039 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028577 5039 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028585 5039 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028593 5039 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028601 5039 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028608 5039 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028616 5039 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028624 5039 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028635 5039 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028643 5039 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028651 5039 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028658 5039 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028667 5039 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028675 5039 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028682 5039 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028690 5039 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028698 5039 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028705 5039 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028713 5039 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028721 5039 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028729 5039 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028736 5039 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028744 5039 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028752 5039 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028759 5039 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028767 5039 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028774 5039 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028782 5039 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028790 5039 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.028797 5039 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.028823 5039 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.041136 5039 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.041185 5039 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041341 5039 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041357 5039 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041367 5039 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041379 5039 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041389 5039 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041399 5039 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041410 5039 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041421 5039 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041431 5039 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041441 5039 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041451 5039 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041460 5039 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041471 5039 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041481 5039 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041490 5039 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041500 5039 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041568 5039 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041577 5039 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041587 5039 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041596 5039 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041606 5039 feature_gate.go:330] unrecognized feature gate: Example Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041615 5039 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041625 5039 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041634 5039 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041644 5039 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041654 5039 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041663 5039 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041672 5039 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041683 5039 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041697 5039 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041707 5039 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041717 5039 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041726 5039 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041736 5039 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041746 5039 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041760 5039 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041775 5039 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041788 5039 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041802 5039 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041813 5039 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041825 5039 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041836 5039 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041846 5039 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041855 5039 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041865 5039 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041878 5039 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041892 5039 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041905 5039 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041958 5039 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041969 5039 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041980 5039 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.041990 5039 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042000 5039 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042012 5039 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042022 5039 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042032 5039 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042043 5039 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042055 5039 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042065 5039 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042075 5039 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042086 5039 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042097 5039 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042107 5039 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042118 5039 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042128 5039 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042139 5039 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042149 5039 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042159 5039 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042169 5039 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042179 5039 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042189 5039 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.042206 5039 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042552 5039 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042571 5039 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042582 5039 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042593 5039 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042603 5039 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042613 5039 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042624 5039 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042635 5039 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042645 5039 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042655 5039 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042665 5039 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042674 5039 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042683 5039 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042697 5039 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042711 5039 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042721 5039 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042732 5039 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042741 5039 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042751 5039 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042761 5039 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042771 5039 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042780 5039 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042790 5039 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042800 5039 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042810 5039 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042820 5039 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042829 5039 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042839 5039 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042848 5039 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042861 5039 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042871 5039 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042881 5039 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042891 5039 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042900 5039 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042910 5039 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042920 5039 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042929 5039 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042938 5039 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042949 5039 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042959 5039 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042969 5039 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042979 5039 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.042989 5039 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043002 5039 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043016 5039 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043029 5039 feature_gate.go:330] unrecognized feature gate: Example Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043041 5039 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043051 5039 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043062 5039 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043072 5039 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043081 5039 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043091 5039 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043101 5039 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043112 5039 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043125 5039 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043136 5039 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043146 5039 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043156 5039 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043165 5039 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043175 5039 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043188 5039 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043198 5039 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043207 5039 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043217 5039 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043226 5039 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043257 5039 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043268 5039 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043278 5039 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043288 5039 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043298 5039 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.043307 5039 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.043325 5039 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.043665 5039 server.go:940] "Client rotation is on, will bootstrap in background" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.050841 5039 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.051021 5039 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.053052 5039 server.go:997] "Starting client certificate rotation" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.053111 5039 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.053376 5039 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-31 12:15:55.83350261 +0000 UTC Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.053495 5039 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 886h57m47.780013641s for next certificate rotation Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.081411 5039 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.085834 5039 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.113258 5039 log.go:25] "Validated CRI v1 runtime API" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.165755 5039 log.go:25] "Validated CRI v1 image API" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.168549 5039 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.175400 5039 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-24-13-13-01-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.175444 5039 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:41 fsType:tmpfs blockSize:0}] Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.197626 5039 manager.go:217] Machine: {Timestamp:2025-11-24 13:18:08.193470893 +0000 UTC m=+0.632595483 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654124544 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:afac5e87-a763-4b8d-96ee-10f975a13d9c BootID:4dbca686-4ed9-4a84-88e1-60f728139059 Filesystems:[{Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:41 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:b1:f4:32 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:b1:f4:32 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:20:00:1f Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:80:19:df Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:d6:aa:10 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:69:d1:98 Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:97:b3:68 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:1a:23:37:8e:c0:b1 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:52:11:06:17:01:ac Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654124544 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.198225 5039 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.198668 5039 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.200547 5039 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.200877 5039 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.200942 5039 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.201303 5039 topology_manager.go:138] "Creating topology manager with none policy" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.201323 5039 container_manager_linux.go:303] "Creating device plugin manager" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.201875 5039 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.201925 5039 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.202777 5039 state_mem.go:36] "Initialized new in-memory state store" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.202927 5039 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.206559 5039 kubelet.go:418] "Attempting to sync node with API server" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.206593 5039 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.206636 5039 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.206662 5039 kubelet.go:324] "Adding apiserver pod source" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.206682 5039 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.211705 5039 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.212949 5039 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.216085 5039 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.216092 5039 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Nov 24 13:18:08 crc kubenswrapper[5039]: E1124 13:18:08.216366 5039 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Nov 24 13:18:08 crc kubenswrapper[5039]: E1124 13:18:08.216444 5039 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.216854 5039 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.218659 5039 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.218709 5039 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.218726 5039 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.218742 5039 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.218764 5039 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.218781 5039 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.218796 5039 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.218819 5039 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.218835 5039 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.218851 5039 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.218870 5039 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.218888 5039 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.219815 5039 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.220753 5039 server.go:1280] "Started kubelet" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.221910 5039 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.222468 5039 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.222690 5039 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 24 13:18:08 crc systemd[1]: Started Kubernetes Kubelet. Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.225823 5039 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.225907 5039 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.225964 5039 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.226036 5039 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 22:27:19.745713372 +0000 UTC Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.226138 5039 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 633h9m11.51958165s for next certificate rotation Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.226315 5039 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.226340 5039 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 24 13:18:08 crc kubenswrapper[5039]: E1124 13:18:08.226385 5039 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.229249 5039 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 24 13:18:08 crc kubenswrapper[5039]: E1124 13:18:08.236659 5039 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="200ms" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.237306 5039 server.go:460] "Adding debug handlers to kubelet server" Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.238524 5039 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Nov 24 13:18:08 crc kubenswrapper[5039]: E1124 13:18:08.238646 5039 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.239045 5039 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.239163 5039 factory.go:55] Registering systemd factory Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.239225 5039 factory.go:221] Registration of the systemd container factory successfully Nov 24 13:18:08 crc kubenswrapper[5039]: E1124 13:18:08.237845 5039 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.175:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187af3d234a25ed0 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-24 13:18:08.22070856 +0000 UTC m=+0.659833090,LastTimestamp:2025-11-24 13:18:08.22070856 +0000 UTC m=+0.659833090,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.241270 5039 factory.go:153] Registering CRI-O factory Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.241302 5039 factory.go:221] Registration of the crio container factory successfully Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.241332 5039 factory.go:103] Registering Raw factory Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.241353 5039 manager.go:1196] Started watching for new ooms in manager Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.242072 5039 manager.go:319] Starting recovery of all containers Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.268350 5039 manager.go:324] Recovery completed Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.268375 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.268812 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.268829 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.268845 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.268859 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.268873 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.268887 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.268902 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.268918 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.268930 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.268942 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.268955 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.268966 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.268980 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.268995 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269009 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269025 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269036 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269084 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269099 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269109 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269121 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269134 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269146 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269159 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269172 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269188 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269203 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269216 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269228 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269241 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269254 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269268 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269281 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269296 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269309 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269321 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269333 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269344 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269356 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269368 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269381 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.269392 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.277809 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.279671 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.279724 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.279747 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.280535 5039 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.280557 5039 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.280579 5039 state_mem.go:36] "Initialized new in-memory state store" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289390 5039 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289429 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289448 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289462 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289475 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289489 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289520 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289533 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289545 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289558 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289579 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289599 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289614 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289642 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289655 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289667 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289679 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289690 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289700 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289713 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289726 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289737 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289749 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289760 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289772 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289784 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289799 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289811 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289823 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289835 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289847 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289864 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289876 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289887 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289910 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289922 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289934 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289944 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289958 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289969 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289983 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.289993 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290005 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290017 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290029 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290040 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290051 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290064 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290077 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290089 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290101 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290114 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290127 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290139 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290177 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290189 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290201 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290213 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290225 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290239 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290252 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290262 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290280 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290293 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290307 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290320 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290333 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290346 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290358 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290370 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290381 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290393 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290405 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290415 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290428 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290439 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290452 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290464 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290477 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290488 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290588 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290603 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290616 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290628 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290640 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290653 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290666 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290680 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290696 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290712 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290726 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290739 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290753 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290765 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290777 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290787 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290798 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290808 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290819 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290831 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290845 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290857 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290869 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290881 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290893 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290904 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290915 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290927 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290939 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290950 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290962 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290973 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290986 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.290996 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291007 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291018 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291030 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291041 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291055 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291067 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291079 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291092 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291104 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291116 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291129 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291141 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291154 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291165 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291178 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291190 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291201 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291212 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291224 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291236 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291249 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291262 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291273 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291286 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291297 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291309 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291321 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291333 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291345 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291357 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291370 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291382 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291394 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291407 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291419 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291432 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291444 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291457 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291472 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291484 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291498 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291529 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291542 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291554 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291564 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291576 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291589 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291600 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291612 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291629 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291641 5039 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291650 5039 reconstruct.go:97] "Volume reconstruction finished" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.291657 5039 reconciler.go:26] "Reconciler: start to sync state" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.303106 5039 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.305276 5039 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.305357 5039 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.305430 5039 kubelet.go:2335] "Starting kubelet main sync loop" Nov 24 13:18:08 crc kubenswrapper[5039]: E1124 13:18:08.305546 5039 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 24 13:18:08 crc kubenswrapper[5039]: W1124 13:18:08.306144 5039 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Nov 24 13:18:08 crc kubenswrapper[5039]: E1124 13:18:08.306249 5039 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Nov 24 13:18:08 crc kubenswrapper[5039]: E1124 13:18:08.327072 5039 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 24 13:18:08 crc kubenswrapper[5039]: E1124 13:18:08.406416 5039 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Nov 24 13:18:08 crc kubenswrapper[5039]: E1124 13:18:08.427752 5039 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 24 13:18:08 crc kubenswrapper[5039]: E1124 13:18:08.438622 5039 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="400ms" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.526188 5039 policy_none.go:49] "None policy: Start" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.527327 5039 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.527360 5039 state_mem.go:35] "Initializing new in-memory state store" Nov 24 13:18:08 crc kubenswrapper[5039]: E1124 13:18:08.527864 5039 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 24 13:18:08 crc kubenswrapper[5039]: E1124 13:18:08.607469 5039 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Nov 24 13:18:08 crc kubenswrapper[5039]: E1124 13:18:08.628494 5039 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.663788 5039 manager.go:334] "Starting Device Plugin manager" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.663863 5039 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.663883 5039 server.go:79] "Starting device plugin registration server" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.664799 5039 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.664821 5039 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.665015 5039 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.665205 5039 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.665226 5039 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 24 13:18:08 crc kubenswrapper[5039]: E1124 13:18:08.672854 5039 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.764984 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.766708 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.766773 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.766793 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.766826 5039 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 13:18:08 crc kubenswrapper[5039]: E1124 13:18:08.767574 5039 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.175:6443: connect: connection refused" node="crc" Nov 24 13:18:08 crc kubenswrapper[5039]: E1124 13:18:08.839299 5039 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="800ms" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.968295 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.970327 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.970384 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.970402 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:08 crc kubenswrapper[5039]: I1124 13:18:08.970437 5039 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 13:18:08 crc kubenswrapper[5039]: E1124 13:18:08.971153 5039 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.175:6443: connect: connection refused" node="crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.008103 5039 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.008228 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.012575 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.012635 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.012646 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.012809 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.013341 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.013396 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.014628 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.014692 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.014721 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.014905 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.014950 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.014964 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.014982 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.015312 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.015347 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.016245 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.016278 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.016291 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.016412 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.016634 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.016711 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.016870 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.016890 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.016899 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.017250 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.017274 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.017282 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.017358 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.017482 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.017517 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.018365 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.018383 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.018391 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.018561 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.018611 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.018931 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.018947 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.018954 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.019096 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.019107 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.019113 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.019708 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.019887 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.020027 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.101234 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.101272 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.101290 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.101305 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.101322 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.101342 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.101361 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.101469 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.101527 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.101594 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.101720 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.101762 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.101784 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.101897 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.101938 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.203464 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.203693 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.203713 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.203777 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.203824 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.203853 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.203865 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.203986 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204066 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204072 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204101 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204149 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204187 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204237 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204235 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204290 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204303 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204331 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204348 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204382 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204369 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204429 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204454 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204469 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204537 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204547 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204600 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204653 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204701 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.204757 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.223111 5039 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.359102 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.370227 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.371989 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.373250 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.373316 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.373336 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.373381 5039 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 13:18:09 crc kubenswrapper[5039]: E1124 13:18:09.373949 5039 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.175:6443: connect: connection refused" node="crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.394500 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: W1124 13:18:09.418003 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-b813612146828b7c301ff3ea1acf666ea7ae2fe02fb452bf51afbe9fafeaf1af WatchSource:0}: Error finding container b813612146828b7c301ff3ea1acf666ea7ae2fe02fb452bf51afbe9fafeaf1af: Status 404 returned error can't find the container with id b813612146828b7c301ff3ea1acf666ea7ae2fe02fb452bf51afbe9fafeaf1af Nov 24 13:18:09 crc kubenswrapper[5039]: W1124 13:18:09.419456 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-99eb090f98d934c73777bac002fbf75b321c18f9ef553b35be52b1a6a74f110d WatchSource:0}: Error finding container 99eb090f98d934c73777bac002fbf75b321c18f9ef553b35be52b1a6a74f110d: Status 404 returned error can't find the container with id 99eb090f98d934c73777bac002fbf75b321c18f9ef553b35be52b1a6a74f110d Nov 24 13:18:09 crc kubenswrapper[5039]: W1124 13:18:09.424827 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-8d5d27ab433e79b44073ca547044df2c8e4736a3de0c8e88f0fb805b865c0bcd WatchSource:0}: Error finding container 8d5d27ab433e79b44073ca547044df2c8e4736a3de0c8e88f0fb805b865c0bcd: Status 404 returned error can't find the container with id 8d5d27ab433e79b44073ca547044df2c8e4736a3de0c8e88f0fb805b865c0bcd Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.425950 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: I1124 13:18:09.433387 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 13:18:09 crc kubenswrapper[5039]: W1124 13:18:09.459771 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-d7af542f2341ff95238633dbab7f3fb4b196f202478d5cdf91ee4e0973bfb783 WatchSource:0}: Error finding container d7af542f2341ff95238633dbab7f3fb4b196f202478d5cdf91ee4e0973bfb783: Status 404 returned error can't find the container with id d7af542f2341ff95238633dbab7f3fb4b196f202478d5cdf91ee4e0973bfb783 Nov 24 13:18:09 crc kubenswrapper[5039]: W1124 13:18:09.462309 5039 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Nov 24 13:18:09 crc kubenswrapper[5039]: E1124 13:18:09.462435 5039 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Nov 24 13:18:09 crc kubenswrapper[5039]: W1124 13:18:09.465419 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-2561312aedebcc18cb63f76f0aa9b908b3bf6dafba6394b59fc79e40d017a780 WatchSource:0}: Error finding container 2561312aedebcc18cb63f76f0aa9b908b3bf6dafba6394b59fc79e40d017a780: Status 404 returned error can't find the container with id 2561312aedebcc18cb63f76f0aa9b908b3bf6dafba6394b59fc79e40d017a780 Nov 24 13:18:09 crc kubenswrapper[5039]: W1124 13:18:09.473065 5039 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Nov 24 13:18:09 crc kubenswrapper[5039]: E1124 13:18:09.473141 5039 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Nov 24 13:18:09 crc kubenswrapper[5039]: E1124 13:18:09.640376 5039 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="1.6s" Nov 24 13:18:09 crc kubenswrapper[5039]: W1124 13:18:09.703200 5039 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Nov 24 13:18:09 crc kubenswrapper[5039]: E1124 13:18:09.703288 5039 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Nov 24 13:18:09 crc kubenswrapper[5039]: W1124 13:18:09.760095 5039 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Nov 24 13:18:09 crc kubenswrapper[5039]: E1124 13:18:09.760175 5039 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Nov 24 13:18:10 crc kubenswrapper[5039]: I1124 13:18:10.175102 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:10 crc kubenswrapper[5039]: I1124 13:18:10.177012 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:10 crc kubenswrapper[5039]: I1124 13:18:10.177063 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:10 crc kubenswrapper[5039]: I1124 13:18:10.177080 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:10 crc kubenswrapper[5039]: I1124 13:18:10.177114 5039 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 13:18:10 crc kubenswrapper[5039]: E1124 13:18:10.177615 5039 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.175:6443: connect: connection refused" node="crc" Nov 24 13:18:10 crc kubenswrapper[5039]: I1124 13:18:10.222382 5039 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Nov 24 13:18:10 crc kubenswrapper[5039]: I1124 13:18:10.312715 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d7af542f2341ff95238633dbab7f3fb4b196f202478d5cdf91ee4e0973bfb783"} Nov 24 13:18:10 crc kubenswrapper[5039]: I1124 13:18:10.314051 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"8d5d27ab433e79b44073ca547044df2c8e4736a3de0c8e88f0fb805b865c0bcd"} Nov 24 13:18:10 crc kubenswrapper[5039]: I1124 13:18:10.315225 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b813612146828b7c301ff3ea1acf666ea7ae2fe02fb452bf51afbe9fafeaf1af"} Nov 24 13:18:10 crc kubenswrapper[5039]: I1124 13:18:10.316401 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"99eb090f98d934c73777bac002fbf75b321c18f9ef553b35be52b1a6a74f110d"} Nov 24 13:18:10 crc kubenswrapper[5039]: I1124 13:18:10.317477 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"2561312aedebcc18cb63f76f0aa9b908b3bf6dafba6394b59fc79e40d017a780"} Nov 24 13:18:11 crc kubenswrapper[5039]: W1124 13:18:11.150794 5039 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Nov 24 13:18:11 crc kubenswrapper[5039]: E1124 13:18:11.150886 5039 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.223585 5039 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Nov 24 13:18:11 crc kubenswrapper[5039]: E1124 13:18:11.241888 5039 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.175:6443: connect: connection refused" interval="3.2s" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.323570 5039 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae" exitCode=0 Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.323667 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae"} Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.323687 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.324962 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.324999 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.325011 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.326027 5039 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb" exitCode=0 Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.326095 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb"} Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.326220 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.327639 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.327665 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.327678 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.329238 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.329980 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819"} Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.330026 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890"} Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.330039 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24"} Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.330225 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.330248 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.330260 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.332175 5039 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="27b93980b3344c4e620a144618543370cda666b5e9d06144726581ff5ca6a4a8" exitCode=0 Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.332278 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.332279 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"27b93980b3344c4e620a144618543370cda666b5e9d06144726581ff5ca6a4a8"} Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.333150 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.333186 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.333199 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.334183 5039 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="c3e22af4a2ec7fcbde66ae997e42b5d809457981a7e33e0f6505ab9c3c1f35f6" exitCode=0 Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.334218 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"c3e22af4a2ec7fcbde66ae997e42b5d809457981a7e33e0f6505ab9c3c1f35f6"} Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.334255 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.335361 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.335399 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.335412 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.778416 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.780923 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.780988 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.781000 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:11 crc kubenswrapper[5039]: I1124 13:18:11.781030 5039 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 13:18:11 crc kubenswrapper[5039]: E1124 13:18:11.781558 5039 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.175:6443: connect: connection refused" node="crc" Nov 24 13:18:11 crc kubenswrapper[5039]: W1124 13:18:11.986318 5039 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Nov 24 13:18:11 crc kubenswrapper[5039]: E1124 13:18:11.986489 5039 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.223162 5039 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Nov 24 13:18:12 crc kubenswrapper[5039]: W1124 13:18:12.282624 5039 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Nov 24 13:18:12 crc kubenswrapper[5039]: E1124 13:18:12.282692 5039 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.338801 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c"} Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.338923 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.339822 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.339847 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.339856 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.341570 5039 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="899b4100570354a645e5e3b03329be096e577096fcf61094177426d628dca7f9" exitCode=0 Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.341610 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"899b4100570354a645e5e3b03329be096e577096fcf61094177426d628dca7f9"} Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.341681 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.342207 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.342233 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.342244 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.343773 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"9a8081ca1b2aa80ea0bde26f9fb01960953127daffbbbdbe49afdf7af9a337a2"} Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.343828 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.344449 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.344468 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.344478 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.350248 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"5616706b755bdff47ccadc09ae036e231c76e5953f8c5af9ea9cf8f8e449c59c"} Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.350275 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"62e7a017d9a3276e864342729bdc35453bb95e9e469760efb6ea283ffb618228"} Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.350285 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"e6328891f72adff70742b2aa64842672875abde57dfd275453ddbc585af80f9f"} Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.350344 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.351055 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.351078 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.351086 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.353342 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e"} Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.353364 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386"} Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.353374 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4"} Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.353383 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5"} Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.353391 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9"} Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.353454 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.354249 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.354268 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:12 crc kubenswrapper[5039]: I1124 13:18:12.354276 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:12 crc kubenswrapper[5039]: W1124 13:18:12.361572 5039 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.175:6443: connect: connection refused Nov 24 13:18:12 crc kubenswrapper[5039]: E1124 13:18:12.361667 5039 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.175:6443: connect: connection refused" logger="UnhandledError" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.360316 5039 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="16a62e851300e0bbf88cfa51318b6acb1be0cf18d704475f0024d9285194baa3" exitCode=0 Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.360405 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"16a62e851300e0bbf88cfa51318b6acb1be0cf18d704475f0024d9285194baa3"} Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.360542 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.360583 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.360638 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.360578 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.360770 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.360722 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.360583 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.363387 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.363447 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.363465 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.363486 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.363495 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.363544 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.363533 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.363716 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.363761 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.365040 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.365121 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.365155 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.366916 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.366962 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.366989 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.660620 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 13:18:13 crc kubenswrapper[5039]: I1124 13:18:13.707961 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.367998 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"03329dd844f905c588d87460b8d1f9f795e5250784a4cb476f2ba7763ff54376"} Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.368064 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"29e548973076b5d6018dbbbac07abcf3f933e5136bcfff09a7e010f5e84c3186"} Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.368022 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.368091 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.368021 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.368242 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.370069 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.370103 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.370117 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.370124 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.370163 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.370187 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.370134 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.370247 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.370133 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.411072 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.425479 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.982382 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.983673 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.983724 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.983743 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:14 crc kubenswrapper[5039]: I1124 13:18:14.983778 5039 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 13:18:15 crc kubenswrapper[5039]: I1124 13:18:15.378692 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"37b1fa01ffac84610e0bb6bce6aed083ba9b18d7e130233b0c0a07fd59793e00"} Nov 24 13:18:15 crc kubenswrapper[5039]: I1124 13:18:15.378759 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"bf817ae7683a4d2a4317f8570787fc9a53a29ce1a217a390ceae680e82692680"} Nov 24 13:18:15 crc kubenswrapper[5039]: I1124 13:18:15.378794 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c3e6be27807764e40e81f1458a9054cbdf2f972cc7806ecd4527129b1364dea0"} Nov 24 13:18:15 crc kubenswrapper[5039]: I1124 13:18:15.378806 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:15 crc kubenswrapper[5039]: I1124 13:18:15.378900 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:15 crc kubenswrapper[5039]: I1124 13:18:15.378972 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:15 crc kubenswrapper[5039]: I1124 13:18:15.380783 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:15 crc kubenswrapper[5039]: I1124 13:18:15.380811 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:15 crc kubenswrapper[5039]: I1124 13:18:15.380851 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:15 crc kubenswrapper[5039]: I1124 13:18:15.380874 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:15 crc kubenswrapper[5039]: I1124 13:18:15.380825 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:15 crc kubenswrapper[5039]: I1124 13:18:15.380951 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:15 crc kubenswrapper[5039]: I1124 13:18:15.382071 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:15 crc kubenswrapper[5039]: I1124 13:18:15.382133 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:15 crc kubenswrapper[5039]: I1124 13:18:15.382158 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:16 crc kubenswrapper[5039]: I1124 13:18:16.382179 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:16 crc kubenswrapper[5039]: I1124 13:18:16.382328 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:16 crc kubenswrapper[5039]: I1124 13:18:16.384053 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:16 crc kubenswrapper[5039]: I1124 13:18:16.384123 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:16 crc kubenswrapper[5039]: I1124 13:18:16.384119 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:16 crc kubenswrapper[5039]: I1124 13:18:16.384146 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:16 crc kubenswrapper[5039]: I1124 13:18:16.384169 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:16 crc kubenswrapper[5039]: I1124 13:18:16.384184 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:16 crc kubenswrapper[5039]: I1124 13:18:16.401647 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 24 13:18:16 crc kubenswrapper[5039]: I1124 13:18:16.796707 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 13:18:16 crc kubenswrapper[5039]: I1124 13:18:16.797036 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:16 crc kubenswrapper[5039]: I1124 13:18:16.798734 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:16 crc kubenswrapper[5039]: I1124 13:18:16.798779 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:16 crc kubenswrapper[5039]: I1124 13:18:16.798791 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:16 crc kubenswrapper[5039]: I1124 13:18:16.906820 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 13:18:17 crc kubenswrapper[5039]: I1124 13:18:17.180771 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 24 13:18:17 crc kubenswrapper[5039]: I1124 13:18:17.384850 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:17 crc kubenswrapper[5039]: I1124 13:18:17.384983 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:17 crc kubenswrapper[5039]: I1124 13:18:17.385664 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:17 crc kubenswrapper[5039]: I1124 13:18:17.385695 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:17 crc kubenswrapper[5039]: I1124 13:18:17.385704 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:17 crc kubenswrapper[5039]: I1124 13:18:17.386477 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:17 crc kubenswrapper[5039]: I1124 13:18:17.386558 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:17 crc kubenswrapper[5039]: I1124 13:18:17.386578 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:18 crc kubenswrapper[5039]: I1124 13:18:18.388181 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:18 crc kubenswrapper[5039]: I1124 13:18:18.389905 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:18 crc kubenswrapper[5039]: I1124 13:18:18.389978 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:18 crc kubenswrapper[5039]: I1124 13:18:18.390008 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:18 crc kubenswrapper[5039]: E1124 13:18:18.672981 5039 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 24 13:18:19 crc kubenswrapper[5039]: I1124 13:18:19.907147 5039 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 24 13:18:19 crc kubenswrapper[5039]: I1124 13:18:19.907228 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 24 13:18:20 crc kubenswrapper[5039]: I1124 13:18:20.970034 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 13:18:20 crc kubenswrapper[5039]: I1124 13:18:20.970227 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:20 crc kubenswrapper[5039]: I1124 13:18:20.972029 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:20 crc kubenswrapper[5039]: I1124 13:18:20.972083 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:20 crc kubenswrapper[5039]: I1124 13:18:20.972107 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:23 crc kubenswrapper[5039]: I1124 13:18:23.224276 5039 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 24 13:18:23 crc kubenswrapper[5039]: I1124 13:18:23.651827 5039 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 24 13:18:23 crc kubenswrapper[5039]: I1124 13:18:23.651906 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 24 13:18:23 crc kubenswrapper[5039]: I1124 13:18:23.657149 5039 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 24 13:18:23 crc kubenswrapper[5039]: I1124 13:18:23.657240 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 24 13:18:26 crc kubenswrapper[5039]: I1124 13:18:26.805115 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 13:18:26 crc kubenswrapper[5039]: I1124 13:18:26.805361 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:26 crc kubenswrapper[5039]: I1124 13:18:26.807114 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:26 crc kubenswrapper[5039]: I1124 13:18:26.807205 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:26 crc kubenswrapper[5039]: I1124 13:18:26.807224 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:26 crc kubenswrapper[5039]: I1124 13:18:26.812147 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 13:18:27 crc kubenswrapper[5039]: I1124 13:18:27.218416 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 24 13:18:27 crc kubenswrapper[5039]: I1124 13:18:27.218704 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:27 crc kubenswrapper[5039]: I1124 13:18:27.220422 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:27 crc kubenswrapper[5039]: I1124 13:18:27.220545 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:27 crc kubenswrapper[5039]: I1124 13:18:27.220579 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:27 crc kubenswrapper[5039]: I1124 13:18:27.235987 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 24 13:18:27 crc kubenswrapper[5039]: I1124 13:18:27.411830 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:27 crc kubenswrapper[5039]: I1124 13:18:27.412093 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:27 crc kubenswrapper[5039]: I1124 13:18:27.413130 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:27 crc kubenswrapper[5039]: I1124 13:18:27.413193 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:27 crc kubenswrapper[5039]: I1124 13:18:27.413251 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:27 crc kubenswrapper[5039]: I1124 13:18:27.414058 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:27 crc kubenswrapper[5039]: I1124 13:18:27.414109 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:27 crc kubenswrapper[5039]: I1124 13:18:27.414126 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:28 crc kubenswrapper[5039]: E1124 13:18:28.645255 5039 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 24 13:18:28 crc kubenswrapper[5039]: I1124 13:18:28.649643 5039 trace.go:236] Trace[1684650536]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (24-Nov-2025 13:18:17.530) (total time: 11119ms): Nov 24 13:18:28 crc kubenswrapper[5039]: Trace[1684650536]: ---"Objects listed" error: 11119ms (13:18:28.649) Nov 24 13:18:28 crc kubenswrapper[5039]: Trace[1684650536]: [11.119107075s] [11.119107075s] END Nov 24 13:18:28 crc kubenswrapper[5039]: I1124 13:18:28.649692 5039 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 24 13:18:28 crc kubenswrapper[5039]: I1124 13:18:28.649895 5039 trace.go:236] Trace[812774605]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (24-Nov-2025 13:18:17.042) (total time: 11607ms): Nov 24 13:18:28 crc kubenswrapper[5039]: Trace[812774605]: ---"Objects listed" error: 11607ms (13:18:28.649) Nov 24 13:18:28 crc kubenswrapper[5039]: Trace[812774605]: [11.60718248s] [11.60718248s] END Nov 24 13:18:28 crc kubenswrapper[5039]: I1124 13:18:28.649939 5039 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 24 13:18:28 crc kubenswrapper[5039]: I1124 13:18:28.650549 5039 trace.go:236] Trace[1160538680]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (24-Nov-2025 13:18:14.756) (total time: 13894ms): Nov 24 13:18:28 crc kubenswrapper[5039]: Trace[1160538680]: ---"Objects listed" error: 13894ms (13:18:28.650) Nov 24 13:18:28 crc kubenswrapper[5039]: Trace[1160538680]: [13.894389942s] [13.894389942s] END Nov 24 13:18:28 crc kubenswrapper[5039]: I1124 13:18:28.651172 5039 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 24 13:18:28 crc kubenswrapper[5039]: I1124 13:18:28.653989 5039 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 24 13:18:28 crc kubenswrapper[5039]: E1124 13:18:28.654095 5039 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 24 13:18:28 crc kubenswrapper[5039]: I1124 13:18:28.657929 5039 trace.go:236] Trace[1056983270]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (24-Nov-2025 13:18:17.766) (total time: 10891ms): Nov 24 13:18:28 crc kubenswrapper[5039]: Trace[1056983270]: ---"Objects listed" error: 10891ms (13:18:28.657) Nov 24 13:18:28 crc kubenswrapper[5039]: Trace[1056983270]: [10.891510626s] [10.891510626s] END Nov 24 13:18:28 crc kubenswrapper[5039]: I1124 13:18:28.657963 5039 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 24 13:18:28 crc kubenswrapper[5039]: I1124 13:18:28.694652 5039 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:56522->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 24 13:18:28 crc kubenswrapper[5039]: I1124 13:18:28.694763 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:56522->192.168.126.11:17697: read: connection reset by peer" Nov 24 13:18:28 crc kubenswrapper[5039]: I1124 13:18:28.696003 5039 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:36276->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 24 13:18:28 crc kubenswrapper[5039]: I1124 13:18:28.696081 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:36276->192.168.126.11:17697: read: connection reset by peer" Nov 24 13:18:28 crc kubenswrapper[5039]: I1124 13:18:28.697846 5039 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 24 13:18:28 crc kubenswrapper[5039]: I1124 13:18:28.698062 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 24 13:18:28 crc kubenswrapper[5039]: E1124 13:18:28.724322 5039 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e.scope\": RecentStats: unable to find data in memory cache]" Nov 24 13:18:28 crc kubenswrapper[5039]: I1124 13:18:28.930073 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 13:18:28 crc kubenswrapper[5039]: I1124 13:18:28.938461 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.219391 5039 apiserver.go:52] "Watching apiserver" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.226923 5039 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.227376 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.228084 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.228147 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.228219 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.228278 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.228308 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.228807 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.229318 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.230496 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.230602 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.230522 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.231243 5039 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.234243 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.234465 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.234565 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.234634 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.234655 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.234961 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.235020 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.235035 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.257192 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.257374 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.257471 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.257615 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.257712 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.257803 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.257893 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.257983 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.259716 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.258386 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.258644 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.258985 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.259335 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.259651 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.259670 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.259553 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.259875 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.260128 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.260190 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.260244 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.260254 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.260295 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.260347 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.260406 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.260455 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.260540 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.260580 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.260599 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.260690 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.260749 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.260803 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.260854 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.260910 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.261043 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.261098 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.261157 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.261210 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.261310 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.261365 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.261415 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.261463 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.261562 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.261617 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.261669 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.261722 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.260620 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.261097 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.261884 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.261917 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.262351 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.262402 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.262350 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.262440 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.262670 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.262824 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.263044 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.263080 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.263168 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.263335 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.263617 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.263931 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.264010 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.264132 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.264180 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.264237 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.264288 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.264305 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.264352 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.264406 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.264439 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.264538 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.264461 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.264696 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.264757 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.264811 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.264863 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.264878 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.264905 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.264916 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.264950 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.265021 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.265032 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.265388 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.265437 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.265479 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.265552 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.265593 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.265628 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.265646 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.265663 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.267686 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.265663 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.265923 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.265993 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.266235 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.266946 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.267167 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.267958 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.267995 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.267280 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.267608 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.267693 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.268087 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.267839 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.268251 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.268284 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.268309 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.268319 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.268331 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.268404 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.268450 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.268491 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.268589 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.268738 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.268848 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.268891 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.268925 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.268959 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.268994 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269030 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269064 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269099 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269131 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269165 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269205 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269254 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269284 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269319 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269357 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269391 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269426 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269462 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269497 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269602 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269645 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269682 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269718 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269753 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269791 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269827 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269863 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269897 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269937 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269976 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270011 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270049 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270083 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270068 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270120 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270242 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270281 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270315 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270356 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270390 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270426 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270458 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270491 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270575 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270610 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270647 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270680 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270714 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270749 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270788 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270824 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270858 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270892 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270931 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270996 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271029 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271065 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271098 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271132 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271168 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271207 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271249 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271288 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271322 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271358 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271393 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271428 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271461 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271497 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271584 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271620 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271655 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271693 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271728 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271765 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271840 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271877 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271914 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271951 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271986 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272022 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272059 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272094 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272132 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272175 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272209 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272244 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272289 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272327 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272363 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272399 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272433 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272470 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272542 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272588 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272630 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272671 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272709 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272745 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272780 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272817 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272853 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272890 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272927 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272962 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.272997 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273032 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273067 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273104 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273139 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273177 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273215 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273282 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273317 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273354 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273390 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273428 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273468 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273540 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273590 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273630 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273667 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273706 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273741 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273777 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273814 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273850 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273889 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273926 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273993 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274050 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274091 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274133 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274174 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274212 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274249 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274287 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274327 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274371 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274414 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274458 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274498 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274618 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274686 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274710 5039 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274734 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274756 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274781 5039 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274807 5039 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274834 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274857 5039 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274884 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274907 5039 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274931 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274953 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274974 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274995 5039 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275017 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275038 5039 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275059 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275093 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275115 5039 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275135 5039 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275158 5039 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275185 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275212 5039 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275238 5039 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275260 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275280 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275300 5039 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275322 5039 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275342 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275364 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275385 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275406 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275427 5039 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275448 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275473 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275496 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275562 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275590 5039 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275613 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275635 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275655 5039 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275678 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275701 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275723 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275744 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275765 5039 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275786 5039 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269447 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.269880 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.270004 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.279647 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271242 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271409 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271597 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.271769 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273056 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.273491 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274204 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274581 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274849 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274910 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274925 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.274941 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275098 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275149 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275259 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275342 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275455 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.275835 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.276254 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.277325 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.277474 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.277576 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.277639 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.277984 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.278139 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.278554 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.278564 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.278598 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.278617 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.278686 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.279428 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.279643 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.280034 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.281685 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.281742 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.281754 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.281778 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.281798 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.282080 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.282459 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.283124 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.283441 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.283467 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.283494 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.283549 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.283641 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.283701 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.283726 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.284109 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.284320 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.284346 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.284661 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.285376 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.285390 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.285724 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.285755 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.285397 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.286118 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:18:29.786067162 +0000 UTC m=+22.225191722 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.286258 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.286269 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.286946 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.287364 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.287395 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.288097 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.288290 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.289390 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.289450 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.289606 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.289609 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.288346 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.288805 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.289173 5039 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.290149 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:29.790115299 +0000 UTC m=+22.229239879 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.290147 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.289165 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.290183 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.290194 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.290232 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.290288 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.290727 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.290782 5039 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.290833 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.290871 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:29.790837128 +0000 UTC m=+22.229961668 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.290883 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.290893 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.291882 5039 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.292135 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.290784 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.292815 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.292847 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.293014 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.293312 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.293990 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.294569 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.294772 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.296187 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.298004 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.298692 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.299034 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.299256 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.297157 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.306147 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.306645 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.309127 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.309179 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.309201 5039 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.309345 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:29.809313985 +0000 UTC m=+22.248438685 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.309718 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.310752 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.310794 5039 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.310864 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:29.810844155 +0000 UTC m=+22.249968655 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.311871 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.312282 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.313677 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.313798 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.313825 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.314209 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.315788 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.316388 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.316670 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.316966 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.317028 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.317140 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.317370 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.317487 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.317578 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.317941 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.318099 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.318517 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.318559 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.318559 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.318866 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.319227 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.319582 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.320069 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.320207 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.320588 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.321008 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.321200 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.321482 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.321619 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.322128 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.322778 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.322921 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.322976 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.323701 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.326402 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.326545 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.326596 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.326617 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.326675 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.326874 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.327232 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.327236 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.327783 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.327986 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.328148 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.328175 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.328662 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.328755 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.329118 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.329381 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.330048 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.330238 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.346180 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.355263 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.356990 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.365005 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.367006 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.369232 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379469 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379635 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379691 5039 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379706 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379718 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379730 5039 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379741 5039 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379752 5039 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379763 5039 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379773 5039 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379784 5039 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379794 5039 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379805 5039 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379819 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379831 5039 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379843 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379855 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379866 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379878 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379890 5039 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379902 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379915 5039 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379927 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379939 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379951 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379965 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379977 5039 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.379988 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380000 5039 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380012 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380023 5039 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380051 5039 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380063 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380075 5039 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380088 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380100 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380113 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380125 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380137 5039 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380173 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380198 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380241 5039 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380257 5039 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380271 5039 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380286 5039 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380301 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380315 5039 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380333 5039 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380349 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380363 5039 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380379 5039 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380392 5039 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380406 5039 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380421 5039 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380436 5039 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380448 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380462 5039 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380477 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380537 5039 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380554 5039 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380570 5039 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380584 5039 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380599 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380613 5039 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380618 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380627 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380669 5039 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380684 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380698 5039 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380712 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380725 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380737 5039 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380748 5039 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380760 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380771 5039 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380784 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380796 5039 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380809 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380822 5039 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380834 5039 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380846 5039 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380856 5039 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380868 5039 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380882 5039 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380903 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380930 5039 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380949 5039 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380964 5039 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380977 5039 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.380990 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381002 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381020 5039 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381034 5039 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381048 5039 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381062 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381075 5039 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381087 5039 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381100 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381115 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381128 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381144 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381158 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381176 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381189 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381202 5039 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381215 5039 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381227 5039 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381240 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381253 5039 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381266 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381328 5039 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381341 5039 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381354 5039 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381367 5039 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381378 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381392 5039 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381405 5039 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381419 5039 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381435 5039 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381449 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381464 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381477 5039 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381491 5039 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381536 5039 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381551 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381564 5039 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381577 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381590 5039 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381602 5039 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381618 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381633 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381645 5039 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381658 5039 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381671 5039 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381683 5039 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381695 5039 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381709 5039 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381722 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381735 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381750 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381765 5039 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381780 5039 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381796 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381812 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381825 5039 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381840 5039 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381854 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381868 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381881 5039 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381910 5039 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.381927 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.382947 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.422678 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.422962 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.424451 5039 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e" exitCode=255 Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.424646 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e"} Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.431355 5039 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.437399 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.437747 5039 scope.go:117] "RemoveContainer" containerID="42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.447796 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.463435 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.480470 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.494097 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.503461 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.516718 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.527326 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.537407 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.546833 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.551141 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.563033 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.570700 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 13:18:29 crc kubenswrapper[5039]: W1124 13:18:29.602821 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-8ccdfdd87735a2d0ab590f27ba9406e095f9f083b5a6ac2c9e9b474084d962ea WatchSource:0}: Error finding container 8ccdfdd87735a2d0ab590f27ba9406e095f9f083b5a6ac2c9e9b474084d962ea: Status 404 returned error can't find the container with id 8ccdfdd87735a2d0ab590f27ba9406e095f9f083b5a6ac2c9e9b474084d962ea Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.788054 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.788236 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:18:30.788209418 +0000 UTC m=+23.227333978 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.888687 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.888745 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.888776 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:29 crc kubenswrapper[5039]: I1124 13:18:29.888807 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.888842 5039 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.888862 5039 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.888912 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:30.888896408 +0000 UTC m=+23.328020908 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.888938 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:30.888919248 +0000 UTC m=+23.328043748 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.888941 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.888966 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.888982 5039 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.889037 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:30.889021981 +0000 UTC m=+23.328146541 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.889133 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.889202 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.889220 5039 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:29 crc kubenswrapper[5039]: E1124 13:18:29.889308 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:30.889284108 +0000 UTC m=+23.328408608 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.311098 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.311754 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.313260 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.314384 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.316045 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.316934 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.317910 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.319436 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.320666 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.322549 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.323656 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.325346 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.326651 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.327748 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.328840 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.330028 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.331249 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.332128 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.333597 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.334414 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.334990 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.335620 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.336042 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.336668 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.337049 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.337623 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.338245 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.338715 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.339355 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.339926 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.340523 5039 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.340646 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.342254 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.342841 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.348230 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.350164 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.350957 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.351989 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.352760 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.353908 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.354338 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.355315 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.356051 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.357204 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.357768 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.358850 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.359394 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.360550 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.361083 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.362012 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.362560 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.363200 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.364396 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.364888 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.432006 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.434572 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2"} Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.436239 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"8ccdfdd87735a2d0ab590f27ba9406e095f9f083b5a6ac2c9e9b474084d962ea"} Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.439669 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea"} Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.439734 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7"} Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.439762 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"a09788b06c91040a121c36132ae3336d117efd9eafabd22e3bdde46ac23991e9"} Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.441319 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205"} Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.441361 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"2c2add2a0ead82e97698158ff74636d7773b1ce78ffa759795df60de71e3eace"} Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.457154 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.472391 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.486543 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.496854 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.510413 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.526411 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.539448 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.557904 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.573339 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.592339 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.607414 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.621120 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.635033 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.648781 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.666078 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.682674 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.795965 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:18:30 crc kubenswrapper[5039]: E1124 13:18:30.796247 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:18:32.796211766 +0000 UTC m=+25.235336326 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.896824 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.896881 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.896901 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:30 crc kubenswrapper[5039]: I1124 13:18:30.896918 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:30 crc kubenswrapper[5039]: E1124 13:18:30.897009 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 13:18:30 crc kubenswrapper[5039]: E1124 13:18:30.897024 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 13:18:30 crc kubenswrapper[5039]: E1124 13:18:30.897034 5039 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:30 crc kubenswrapper[5039]: E1124 13:18:30.897104 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:32.897087281 +0000 UTC m=+25.336211771 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:30 crc kubenswrapper[5039]: E1124 13:18:30.897126 5039 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 13:18:30 crc kubenswrapper[5039]: E1124 13:18:30.897247 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:32.897217524 +0000 UTC m=+25.336342064 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 13:18:30 crc kubenswrapper[5039]: E1124 13:18:30.897242 5039 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 13:18:30 crc kubenswrapper[5039]: E1124 13:18:30.897371 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:32.897342557 +0000 UTC m=+25.336467097 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 13:18:30 crc kubenswrapper[5039]: E1124 13:18:30.897258 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 13:18:30 crc kubenswrapper[5039]: E1124 13:18:30.897469 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 13:18:30 crc kubenswrapper[5039]: E1124 13:18:30.897495 5039 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:30 crc kubenswrapper[5039]: E1124 13:18:30.897597 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:32.897576253 +0000 UTC m=+25.336700793 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:31 crc kubenswrapper[5039]: I1124 13:18:31.306217 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:31 crc kubenswrapper[5039]: E1124 13:18:31.306320 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:18:31 crc kubenswrapper[5039]: I1124 13:18:31.306326 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:31 crc kubenswrapper[5039]: I1124 13:18:31.306232 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:31 crc kubenswrapper[5039]: E1124 13:18:31.306592 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:18:31 crc kubenswrapper[5039]: E1124 13:18:31.306733 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:18:31 crc kubenswrapper[5039]: I1124 13:18:31.443852 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 13:18:32 crc kubenswrapper[5039]: I1124 13:18:32.448849 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044"} Nov 24 13:18:32 crc kubenswrapper[5039]: I1124 13:18:32.464083 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:32Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:32 crc kubenswrapper[5039]: I1124 13:18:32.482132 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:32Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:32 crc kubenswrapper[5039]: I1124 13:18:32.496528 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:32Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:32 crc kubenswrapper[5039]: I1124 13:18:32.508509 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:32Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:32 crc kubenswrapper[5039]: I1124 13:18:32.523816 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:32Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:32 crc kubenswrapper[5039]: I1124 13:18:32.536156 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:32Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:32 crc kubenswrapper[5039]: I1124 13:18:32.556979 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:32Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:32 crc kubenswrapper[5039]: I1124 13:18:32.571388 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:32Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:32 crc kubenswrapper[5039]: I1124 13:18:32.817169 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:18:32 crc kubenswrapper[5039]: E1124 13:18:32.817387 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:18:36.817359146 +0000 UTC m=+29.256483646 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:18:32 crc kubenswrapper[5039]: I1124 13:18:32.918607 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:32 crc kubenswrapper[5039]: I1124 13:18:32.918683 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:32 crc kubenswrapper[5039]: I1124 13:18:32.918725 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:32 crc kubenswrapper[5039]: I1124 13:18:32.918764 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:32 crc kubenswrapper[5039]: E1124 13:18:32.918908 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 13:18:32 crc kubenswrapper[5039]: E1124 13:18:32.918952 5039 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 13:18:32 crc kubenswrapper[5039]: E1124 13:18:32.918970 5039 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 13:18:32 crc kubenswrapper[5039]: E1124 13:18:32.918994 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 13:18:32 crc kubenswrapper[5039]: E1124 13:18:32.919064 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 13:18:32 crc kubenswrapper[5039]: E1124 13:18:32.918960 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 13:18:32 crc kubenswrapper[5039]: E1124 13:18:32.919082 5039 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:32 crc kubenswrapper[5039]: E1124 13:18:32.919088 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:36.919051253 +0000 UTC m=+29.358175813 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 13:18:32 crc kubenswrapper[5039]: E1124 13:18:32.919104 5039 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:32 crc kubenswrapper[5039]: E1124 13:18:32.919134 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:36.919114645 +0000 UTC m=+29.358239185 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 13:18:32 crc kubenswrapper[5039]: E1124 13:18:32.919167 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:36.919150846 +0000 UTC m=+29.358275496 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:32 crc kubenswrapper[5039]: E1124 13:18:32.919246 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:36.919219987 +0000 UTC m=+29.358344527 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.305803 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.305881 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.305803 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:33 crc kubenswrapper[5039]: E1124 13:18:33.305925 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:18:33 crc kubenswrapper[5039]: E1124 13:18:33.306017 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:18:33 crc kubenswrapper[5039]: E1124 13:18:33.306119 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.381649 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-k79vj"] Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.381940 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-k79vj" Nov 24 13:18:33 crc kubenswrapper[5039]: W1124 13:18:33.384040 5039 reflector.go:561] object-"openshift-dns"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-dns": no relationship found between node 'crc' and this object Nov 24 13:18:33 crc kubenswrapper[5039]: E1124 13:18:33.384082 5039 reflector.go:158] "Unhandled Error" err="object-\"openshift-dns\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-dns\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 24 13:18:33 crc kubenswrapper[5039]: W1124 13:18:33.385786 5039 reflector.go:561] object-"openshift-dns"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-dns": no relationship found between node 'crc' and this object Nov 24 13:18:33 crc kubenswrapper[5039]: E1124 13:18:33.385809 5039 reflector.go:158] "Unhandled Error" err="object-\"openshift-dns\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-dns\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.386195 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.409263 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:33Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.440345 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:33Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.457372 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:33Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.472099 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:33Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.491614 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:33Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.516764 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:33Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.522684 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/8413bf0a-e541-473a-ae4a-155c6f91b570-hosts-file\") pod \"node-resolver-k79vj\" (UID: \"8413bf0a-e541-473a-ae4a-155c6f91b570\") " pod="openshift-dns/node-resolver-k79vj" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.522794 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whj5m\" (UniqueName: \"kubernetes.io/projected/8413bf0a-e541-473a-ae4a-155c6f91b570-kube-api-access-whj5m\") pod \"node-resolver-k79vj\" (UID: \"8413bf0a-e541-473a-ae4a-155c6f91b570\") " pod="openshift-dns/node-resolver-k79vj" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.533149 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:33Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.560866 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:33Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.586193 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:33Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.623587 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/8413bf0a-e541-473a-ae4a-155c6f91b570-hosts-file\") pod \"node-resolver-k79vj\" (UID: \"8413bf0a-e541-473a-ae4a-155c6f91b570\") " pod="openshift-dns/node-resolver-k79vj" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.623658 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whj5m\" (UniqueName: \"kubernetes.io/projected/8413bf0a-e541-473a-ae4a-155c6f91b570-kube-api-access-whj5m\") pod \"node-resolver-k79vj\" (UID: \"8413bf0a-e541-473a-ae4a-155c6f91b570\") " pod="openshift-dns/node-resolver-k79vj" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.623785 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/8413bf0a-e541-473a-ae4a-155c6f91b570-hosts-file\") pod \"node-resolver-k79vj\" (UID: \"8413bf0a-e541-473a-ae4a-155c6f91b570\") " pod="openshift-dns/node-resolver-k79vj" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.863636 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-8x5rg"] Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.864015 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.867979 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.868409 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.868626 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-kr94g"] Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.869571 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-kr94g" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.875596 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-q77mz"] Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.875925 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.876109 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.876340 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.876398 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.876444 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.876489 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.876556 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.876627 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.877050 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.878820 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.879029 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.893994 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:33Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.904878 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:33Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.917199 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:33Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.931877 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:33Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.941051 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:33Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.952390 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:33Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.967446 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:33Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:33 crc kubenswrapper[5039]: I1124 13:18:33.980846 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:33Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.000469 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:33Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.024579 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.026777 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-host-var-lib-kubelet\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.026810 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/6c18c830-d513-4df0-be92-cd44f2d2c5df-multus-daemon-config\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.026826 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ce86b4cd-2cb0-4cec-8b42-22a855734a60-mcd-auth-proxy-config\") pod \"machine-config-daemon-8x5rg\" (UID: \"ce86b4cd-2cb0-4cec-8b42-22a855734a60\") " pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.026845 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/2c35ff00-6898-4235-af87-d46e63a20111-os-release\") pod \"multus-additional-cni-plugins-q77mz\" (UID: \"2c35ff00-6898-4235-af87-d46e63a20111\") " pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.026858 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/2c35ff00-6898-4235-af87-d46e63a20111-cnibin\") pod \"multus-additional-cni-plugins-q77mz\" (UID: \"2c35ff00-6898-4235-af87-d46e63a20111\") " pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.026872 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/2c35ff00-6898-4235-af87-d46e63a20111-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-q77mz\" (UID: \"2c35ff00-6898-4235-af87-d46e63a20111\") " pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.026954 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-host-run-multus-certs\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.027090 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/ce86b4cd-2cb0-4cec-8b42-22a855734a60-rootfs\") pod \"machine-config-daemon-8x5rg\" (UID: \"ce86b4cd-2cb0-4cec-8b42-22a855734a60\") " pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.027153 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkv7g\" (UniqueName: \"kubernetes.io/projected/6c18c830-d513-4df0-be92-cd44f2d2c5df-kube-api-access-mkv7g\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.027188 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/2c35ff00-6898-4235-af87-d46e63a20111-tuning-conf-dir\") pod \"multus-additional-cni-plugins-q77mz\" (UID: \"2c35ff00-6898-4235-af87-d46e63a20111\") " pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.027217 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ce86b4cd-2cb0-4cec-8b42-22a855734a60-proxy-tls\") pod \"machine-config-daemon-8x5rg\" (UID: \"ce86b4cd-2cb0-4cec-8b42-22a855734a60\") " pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.027244 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2rcz\" (UniqueName: \"kubernetes.io/projected/ce86b4cd-2cb0-4cec-8b42-22a855734a60-kube-api-access-q2rcz\") pod \"machine-config-daemon-8x5rg\" (UID: \"ce86b4cd-2cb0-4cec-8b42-22a855734a60\") " pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.027273 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2c35ff00-6898-4235-af87-d46e63a20111-system-cni-dir\") pod \"multus-additional-cni-plugins-q77mz\" (UID: \"2c35ff00-6898-4235-af87-d46e63a20111\") " pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.027296 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-system-cni-dir\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.027332 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-host-var-lib-cni-multus\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.027371 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-cnibin\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.027399 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srmdr\" (UniqueName: \"kubernetes.io/projected/2c35ff00-6898-4235-af87-d46e63a20111-kube-api-access-srmdr\") pod \"multus-additional-cni-plugins-q77mz\" (UID: \"2c35ff00-6898-4235-af87-d46e63a20111\") " pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.027432 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-multus-socket-dir-parent\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.027452 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-host-var-lib-cni-bin\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.027472 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-multus-conf-dir\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.027496 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/2c35ff00-6898-4235-af87-d46e63a20111-cni-binary-copy\") pod \"multus-additional-cni-plugins-q77mz\" (UID: \"2c35ff00-6898-4235-af87-d46e63a20111\") " pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.027545 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-etc-kubernetes\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.027587 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-multus-cni-dir\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.027614 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/6c18c830-d513-4df0-be92-cd44f2d2c5df-cni-binary-copy\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.027640 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-os-release\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.027661 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-host-run-k8s-cni-cncf-io\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.027682 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-host-run-netns\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.027712 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-hostroot\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.043513 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.074099 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.097337 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.121241 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.128983 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-os-release\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129022 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-host-run-k8s-cni-cncf-io\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129038 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-host-run-netns\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129054 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-hostroot\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129076 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-host-var-lib-kubelet\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129094 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/6c18c830-d513-4df0-be92-cd44f2d2c5df-multus-daemon-config\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129116 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ce86b4cd-2cb0-4cec-8b42-22a855734a60-mcd-auth-proxy-config\") pod \"machine-config-daemon-8x5rg\" (UID: \"ce86b4cd-2cb0-4cec-8b42-22a855734a60\") " pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129140 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/2c35ff00-6898-4235-af87-d46e63a20111-os-release\") pod \"multus-additional-cni-plugins-q77mz\" (UID: \"2c35ff00-6898-4235-af87-d46e63a20111\") " pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129152 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-host-var-lib-kubelet\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129161 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/ce86b4cd-2cb0-4cec-8b42-22a855734a60-rootfs\") pod \"machine-config-daemon-8x5rg\" (UID: \"ce86b4cd-2cb0-4cec-8b42-22a855734a60\") " pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129216 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/2c35ff00-6898-4235-af87-d46e63a20111-cnibin\") pod \"multus-additional-cni-plugins-q77mz\" (UID: \"2c35ff00-6898-4235-af87-d46e63a20111\") " pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129187 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/ce86b4cd-2cb0-4cec-8b42-22a855734a60-rootfs\") pod \"machine-config-daemon-8x5rg\" (UID: \"ce86b4cd-2cb0-4cec-8b42-22a855734a60\") " pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129245 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/2c35ff00-6898-4235-af87-d46e63a20111-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-q77mz\" (UID: \"2c35ff00-6898-4235-af87-d46e63a20111\") " pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129272 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-host-run-multus-certs\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129181 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-host-run-netns\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129295 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkv7g\" (UniqueName: \"kubernetes.io/projected/6c18c830-d513-4df0-be92-cd44f2d2c5df-kube-api-access-mkv7g\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129299 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/2c35ff00-6898-4235-af87-d46e63a20111-cnibin\") pod \"multus-additional-cni-plugins-q77mz\" (UID: \"2c35ff00-6898-4235-af87-d46e63a20111\") " pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129138 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-hostroot\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129335 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/2c35ff00-6898-4235-af87-d46e63a20111-os-release\") pod \"multus-additional-cni-plugins-q77mz\" (UID: \"2c35ff00-6898-4235-af87-d46e63a20111\") " pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129353 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-host-run-k8s-cni-cncf-io\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129389 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-host-run-multus-certs\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129397 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/2c35ff00-6898-4235-af87-d46e63a20111-tuning-conf-dir\") pod \"multus-additional-cni-plugins-q77mz\" (UID: \"2c35ff00-6898-4235-af87-d46e63a20111\") " pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129493 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ce86b4cd-2cb0-4cec-8b42-22a855734a60-proxy-tls\") pod \"machine-config-daemon-8x5rg\" (UID: \"ce86b4cd-2cb0-4cec-8b42-22a855734a60\") " pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129539 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2rcz\" (UniqueName: \"kubernetes.io/projected/ce86b4cd-2cb0-4cec-8b42-22a855734a60-kube-api-access-q2rcz\") pod \"machine-config-daemon-8x5rg\" (UID: \"ce86b4cd-2cb0-4cec-8b42-22a855734a60\") " pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129566 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2c35ff00-6898-4235-af87-d46e63a20111-system-cni-dir\") pod \"multus-additional-cni-plugins-q77mz\" (UID: \"2c35ff00-6898-4235-af87-d46e63a20111\") " pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129590 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-system-cni-dir\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129657 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-host-var-lib-cni-multus\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129663 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2c35ff00-6898-4235-af87-d46e63a20111-system-cni-dir\") pod \"multus-additional-cni-plugins-q77mz\" (UID: \"2c35ff00-6898-4235-af87-d46e63a20111\") " pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129684 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-cnibin\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129716 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srmdr\" (UniqueName: \"kubernetes.io/projected/2c35ff00-6898-4235-af87-d46e63a20111-kube-api-access-srmdr\") pod \"multus-additional-cni-plugins-q77mz\" (UID: \"2c35ff00-6898-4235-af87-d46e63a20111\") " pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129730 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-host-var-lib-cni-multus\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129759 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-multus-socket-dir-parent\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129768 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-cnibin\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129785 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-host-var-lib-cni-bin\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129807 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-multus-conf-dir\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129813 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-host-var-lib-cni-bin\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129829 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/2c35ff00-6898-4235-af87-d46e63a20111-cni-binary-copy\") pod \"multus-additional-cni-plugins-q77mz\" (UID: \"2c35ff00-6898-4235-af87-d46e63a20111\") " pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129782 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-system-cni-dir\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129848 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/6c18c830-d513-4df0-be92-cd44f2d2c5df-cni-binary-copy\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129870 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-multus-socket-dir-parent\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129874 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/6c18c830-d513-4df0-be92-cd44f2d2c5df-multus-daemon-config\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129885 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-multus-conf-dir\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129931 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ce86b4cd-2cb0-4cec-8b42-22a855734a60-mcd-auth-proxy-config\") pod \"machine-config-daemon-8x5rg\" (UID: \"ce86b4cd-2cb0-4cec-8b42-22a855734a60\") " pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.129974 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-etc-kubernetes\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.130009 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-multus-cni-dir\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.130059 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-etc-kubernetes\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.130075 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-multus-cni-dir\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.130149 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/2c35ff00-6898-4235-af87-d46e63a20111-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-q77mz\" (UID: \"2c35ff00-6898-4235-af87-d46e63a20111\") " pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.130221 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/6c18c830-d513-4df0-be92-cd44f2d2c5df-os-release\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.130373 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/2c35ff00-6898-4235-af87-d46e63a20111-cni-binary-copy\") pod \"multus-additional-cni-plugins-q77mz\" (UID: \"2c35ff00-6898-4235-af87-d46e63a20111\") " pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.130419 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/6c18c830-d513-4df0-be92-cd44f2d2c5df-cni-binary-copy\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.130535 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/2c35ff00-6898-4235-af87-d46e63a20111-tuning-conf-dir\") pod \"multus-additional-cni-plugins-q77mz\" (UID: \"2c35ff00-6898-4235-af87-d46e63a20111\") " pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.139375 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ce86b4cd-2cb0-4cec-8b42-22a855734a60-proxy-tls\") pod \"machine-config-daemon-8x5rg\" (UID: \"ce86b4cd-2cb0-4cec-8b42-22a855734a60\") " pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.153365 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.153394 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2rcz\" (UniqueName: \"kubernetes.io/projected/ce86b4cd-2cb0-4cec-8b42-22a855734a60-kube-api-access-q2rcz\") pod \"machine-config-daemon-8x5rg\" (UID: \"ce86b4cd-2cb0-4cec-8b42-22a855734a60\") " pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.157186 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkv7g\" (UniqueName: \"kubernetes.io/projected/6c18c830-d513-4df0-be92-cd44f2d2c5df-kube-api-access-mkv7g\") pod \"multus-kr94g\" (UID: \"6c18c830-d513-4df0-be92-cd44f2d2c5df\") " pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.162552 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srmdr\" (UniqueName: \"kubernetes.io/projected/2c35ff00-6898-4235-af87-d46e63a20111-kube-api-access-srmdr\") pod \"multus-additional-cni-plugins-q77mz\" (UID: \"2c35ff00-6898-4235-af87-d46e63a20111\") " pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.178632 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.181509 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.188313 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-kr94g" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.192622 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.194763 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-q77mz" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.204573 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: W1124 13:18:34.211421 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c35ff00_6898_4235_af87_d46e63a20111.slice/crio-70180778f42efd126ec68defce2a3d7f430ae776dd1dbe977d2d0da2dad267b2 WatchSource:0}: Error finding container 70180778f42efd126ec68defce2a3d7f430ae776dd1dbe977d2d0da2dad267b2: Status 404 returned error can't find the container with id 70180778f42efd126ec68defce2a3d7f430ae776dd1dbe977d2d0da2dad267b2 Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.220365 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.235734 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.247743 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.271372 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.271964 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-w2ctb"] Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.272876 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.277251 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.277436 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.277572 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.277981 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.278103 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.278137 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.278347 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.292424 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.312309 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.326323 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.339257 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.351867 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.364866 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.386022 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.404301 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.412616 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.420659 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.432847 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-run-netns\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.432891 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-run-ovn-kubernetes\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.432925 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-cni-netd\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.432962 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhfzg\" (UniqueName: \"kubernetes.io/projected/54c05b03-6747-47bf-a40d-8a9332c4d856-kube-api-access-fhfzg\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.432985 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.433016 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-kubelet\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.433035 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-node-log\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.433087 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-systemd-units\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.433108 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-cni-bin\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.433132 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/54c05b03-6747-47bf-a40d-8a9332c4d856-ovn-node-metrics-cert\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.433162 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-run-systemd\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.433187 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/54c05b03-6747-47bf-a40d-8a9332c4d856-ovnkube-script-lib\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.433213 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-var-lib-openvswitch\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.433278 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-log-socket\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.433308 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-run-openvswitch\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.433325 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/54c05b03-6747-47bf-a40d-8a9332c4d856-env-overrides\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.433399 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-run-ovn\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.433438 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/54c05b03-6747-47bf-a40d-8a9332c4d856-ovnkube-config\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.433560 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-slash\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.433639 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-etc-openvswitch\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.441487 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.455613 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kr94g" event={"ID":"6c18c830-d513-4df0-be92-cd44f2d2c5df","Type":"ContainerStarted","Data":"a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9"} Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.455661 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kr94g" event={"ID":"6c18c830-d513-4df0-be92-cd44f2d2c5df","Type":"ContainerStarted","Data":"624eeb360bc167f34ace3261862784dd1e0f04c807f27ddf6a411908a050366c"} Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.457643 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7"} Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.457688 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488"} Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.457700 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"6198227be6e02258ff4eaef20192f0034445848f60b4960c86dc991371a9d652"} Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.459165 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" event={"ID":"2c35ff00-6898-4235-af87-d46e63a20111","Type":"ContainerStarted","Data":"ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0"} Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.459219 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" event={"ID":"2c35ff00-6898-4235-af87-d46e63a20111","Type":"ContainerStarted","Data":"70180778f42efd126ec68defce2a3d7f430ae776dd1dbe977d2d0da2dad267b2"} Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.465748 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.478333 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.491492 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.503040 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.513726 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.523546 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.531851 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.534727 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-etc-openvswitch\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.534805 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-run-netns\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.534840 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-run-ovn-kubernetes\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.534868 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-cni-netd\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.534902 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhfzg\" (UniqueName: \"kubernetes.io/projected/54c05b03-6747-47bf-a40d-8a9332c4d856-kube-api-access-fhfzg\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.534928 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.534966 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-kubelet\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.534989 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-node-log\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.534995 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-etc-openvswitch\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.535023 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-systemd-units\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.535041 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-cni-bin\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.535061 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/54c05b03-6747-47bf-a40d-8a9332c4d856-ovn-node-metrics-cert\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.535104 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-run-systemd\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.535165 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/54c05b03-6747-47bf-a40d-8a9332c4d856-ovnkube-script-lib\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.535184 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-var-lib-openvswitch\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.535203 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-log-socket\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.535248 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-run-openvswitch\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.535270 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/54c05b03-6747-47bf-a40d-8a9332c4d856-env-overrides\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.535287 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-run-ovn\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.535317 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/54c05b03-6747-47bf-a40d-8a9332c4d856-ovnkube-config\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.535341 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-slash\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.535681 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-run-systemd\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.535764 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.535710 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-kubelet\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.535770 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-run-ovn-kubernetes\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.535848 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-node-log\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.534968 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-run-netns\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.535966 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-systemd-units\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.535712 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-cni-netd\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.536253 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-log-socket\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.536595 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-cni-bin\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.536620 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-run-openvswitch\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.536700 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-slash\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.537172 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/54c05b03-6747-47bf-a40d-8a9332c4d856-env-overrides\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.537257 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/54c05b03-6747-47bf-a40d-8a9332c4d856-ovnkube-config\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.537270 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-run-ovn\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.537306 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-var-lib-openvswitch\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.537344 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/54c05b03-6747-47bf-a40d-8a9332c4d856-ovnkube-script-lib\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.539446 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/54c05b03-6747-47bf-a40d-8a9332c4d856-ovn-node-metrics-cert\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.546157 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.555215 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhfzg\" (UniqueName: \"kubernetes.io/projected/54c05b03-6747-47bf-a40d-8a9332c4d856-kube-api-access-fhfzg\") pod \"ovnkube-node-w2ctb\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.563166 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.574717 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.587488 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.589073 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: W1124 13:18:34.606811 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod54c05b03_6747_47bf_a40d_8a9332c4d856.slice/crio-d3e18f52aa9a4413c8245a5f84d29d124d1211d07b991d1f411169fab8ac98c9 WatchSource:0}: Error finding container d3e18f52aa9a4413c8245a5f84d29d124d1211d07b991d1f411169fab8ac98c9: Status 404 returned error can't find the container with id d3e18f52aa9a4413c8245a5f84d29d124d1211d07b991d1f411169fab8ac98c9 Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.632262 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: E1124 13:18:34.636397 5039 projected.go:288] Couldn't get configMap openshift-dns/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 24 13:18:34 crc kubenswrapper[5039]: E1124 13:18:34.636462 5039 projected.go:194] Error preparing data for projected volume kube-api-access-whj5m for pod openshift-dns/node-resolver-k79vj: failed to sync configmap cache: timed out waiting for the condition Nov 24 13:18:34 crc kubenswrapper[5039]: E1124 13:18:34.636538 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8413bf0a-e541-473a-ae4a-155c6f91b570-kube-api-access-whj5m podName:8413bf0a-e541-473a-ae4a-155c6f91b570 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:35.136517983 +0000 UTC m=+27.575642483 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-whj5m" (UniqueName: "kubernetes.io/projected/8413bf0a-e541-473a-ae4a-155c6f91b570-kube-api-access-whj5m") pod "node-resolver-k79vj" (UID: "8413bf0a-e541-473a-ae4a-155c6f91b570") : failed to sync configmap cache: timed out waiting for the condition Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.657780 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.671226 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.683705 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.694958 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:34Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:34 crc kubenswrapper[5039]: I1124 13:18:34.756011 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.055058 5039 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.056914 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.056944 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.056954 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.057057 5039 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.065604 5039 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.065861 5039 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.066843 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.066867 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.066876 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.066888 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.066898 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:35Z","lastTransitionTime":"2025-11-24T13:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:35 crc kubenswrapper[5039]: E1124 13:18:35.083220 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.087703 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.087742 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.087754 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.087772 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.087788 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:35Z","lastTransitionTime":"2025-11-24T13:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:35 crc kubenswrapper[5039]: E1124 13:18:35.102199 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.107680 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.107721 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.107734 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.107752 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.107766 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:35Z","lastTransitionTime":"2025-11-24T13:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:35 crc kubenswrapper[5039]: E1124 13:18:35.121050 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.124074 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.124115 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.124130 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.124150 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.124165 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:35Z","lastTransitionTime":"2025-11-24T13:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:35 crc kubenswrapper[5039]: E1124 13:18:35.138641 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.141971 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whj5m\" (UniqueName: \"kubernetes.io/projected/8413bf0a-e541-473a-ae4a-155c6f91b570-kube-api-access-whj5m\") pod \"node-resolver-k79vj\" (UID: \"8413bf0a-e541-473a-ae4a-155c6f91b570\") " pod="openshift-dns/node-resolver-k79vj" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.143119 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.143149 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.143161 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.143178 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.143190 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:35Z","lastTransitionTime":"2025-11-24T13:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.146153 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whj5m\" (UniqueName: \"kubernetes.io/projected/8413bf0a-e541-473a-ae4a-155c6f91b570-kube-api-access-whj5m\") pod \"node-resolver-k79vj\" (UID: \"8413bf0a-e541-473a-ae4a-155c6f91b570\") " pod="openshift-dns/node-resolver-k79vj" Nov 24 13:18:35 crc kubenswrapper[5039]: E1124 13:18:35.157300 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: E1124 13:18:35.157499 5039 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.159394 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.159439 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.159452 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.159472 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.159484 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:35Z","lastTransitionTime":"2025-11-24T13:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.195254 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-k79vj" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.261804 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.261855 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.261868 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.261886 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.261899 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:35Z","lastTransitionTime":"2025-11-24T13:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.305991 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.306063 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:35 crc kubenswrapper[5039]: E1124 13:18:35.306119 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.306146 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:35 crc kubenswrapper[5039]: E1124 13:18:35.306181 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:18:35 crc kubenswrapper[5039]: E1124 13:18:35.306231 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.364368 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.364751 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.364764 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.364785 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.364800 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:35Z","lastTransitionTime":"2025-11-24T13:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.464234 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-k79vj" event={"ID":"8413bf0a-e541-473a-ae4a-155c6f91b570","Type":"ContainerStarted","Data":"f3824ff829a43fca858632cf50553ee85accb6a1c4e02a2a6ab71e462ecd577a"} Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.465931 5039 generic.go:334] "Generic (PLEG): container finished" podID="2c35ff00-6898-4235-af87-d46e63a20111" containerID="ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0" exitCode=0 Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.465991 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" event={"ID":"2c35ff00-6898-4235-af87-d46e63a20111","Type":"ContainerDied","Data":"ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0"} Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.466236 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.466281 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.466298 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.466315 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.466325 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:35Z","lastTransitionTime":"2025-11-24T13:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.468071 5039 generic.go:334] "Generic (PLEG): container finished" podID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerID="23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c" exitCode=0 Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.468102 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerDied","Data":"23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c"} Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.468121 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerStarted","Data":"d3e18f52aa9a4413c8245a5f84d29d124d1211d07b991d1f411169fab8ac98c9"} Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.483062 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.497017 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.508424 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.525315 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.538285 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.553347 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.568132 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.568181 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.568191 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.568208 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.568219 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:35Z","lastTransitionTime":"2025-11-24T13:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.569916 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.582290 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.597879 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.610249 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.621487 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.633152 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.653064 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.665876 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.670163 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.670189 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.670197 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.670211 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.670220 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:35Z","lastTransitionTime":"2025-11-24T13:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.685789 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.699788 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.715652 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.729667 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.734488 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-dsj42"] Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.734857 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-dsj42" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.736581 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.736656 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.736661 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.736581 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.741991 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.754310 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.767338 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.772243 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.772301 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.772318 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.772343 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.772361 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:35Z","lastTransitionTime":"2025-11-24T13:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.782669 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.793777 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.805063 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.816640 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.836819 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.848460 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/88879497-9ba4-4940-975d-d872f5fcccc9-host\") pod \"node-ca-dsj42\" (UID: \"88879497-9ba4-4940-975d-d872f5fcccc9\") " pod="openshift-image-registry/node-ca-dsj42" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.848564 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/88879497-9ba4-4940-975d-d872f5fcccc9-serviceca\") pod \"node-ca-dsj42\" (UID: \"88879497-9ba4-4940-975d-d872f5fcccc9\") " pod="openshift-image-registry/node-ca-dsj42" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.848598 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rz78s\" (UniqueName: \"kubernetes.io/projected/88879497-9ba4-4940-975d-d872f5fcccc9-kube-api-access-rz78s\") pod \"node-ca-dsj42\" (UID: \"88879497-9ba4-4940-975d-d872f5fcccc9\") " pod="openshift-image-registry/node-ca-dsj42" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.850954 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.862849 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.875140 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.875290 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.875329 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.875343 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.875364 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.875378 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:35Z","lastTransitionTime":"2025-11-24T13:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.891724 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.917695 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.928411 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.942337 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.949577 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/88879497-9ba4-4940-975d-d872f5fcccc9-serviceca\") pod \"node-ca-dsj42\" (UID: \"88879497-9ba4-4940-975d-d872f5fcccc9\") " pod="openshift-image-registry/node-ca-dsj42" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.949617 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rz78s\" (UniqueName: \"kubernetes.io/projected/88879497-9ba4-4940-975d-d872f5fcccc9-kube-api-access-rz78s\") pod \"node-ca-dsj42\" (UID: \"88879497-9ba4-4940-975d-d872f5fcccc9\") " pod="openshift-image-registry/node-ca-dsj42" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.949656 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/88879497-9ba4-4940-975d-d872f5fcccc9-host\") pod \"node-ca-dsj42\" (UID: \"88879497-9ba4-4940-975d-d872f5fcccc9\") " pod="openshift-image-registry/node-ca-dsj42" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.949709 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/88879497-9ba4-4940-975d-d872f5fcccc9-host\") pod \"node-ca-dsj42\" (UID: \"88879497-9ba4-4940-975d-d872f5fcccc9\") " pod="openshift-image-registry/node-ca-dsj42" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.950638 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/88879497-9ba4-4940-975d-d872f5fcccc9-serviceca\") pod \"node-ca-dsj42\" (UID: \"88879497-9ba4-4940-975d-d872f5fcccc9\") " pod="openshift-image-registry/node-ca-dsj42" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.956913 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.967641 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rz78s\" (UniqueName: \"kubernetes.io/projected/88879497-9ba4-4940-975d-d872f5fcccc9-kube-api-access-rz78s\") pod \"node-ca-dsj42\" (UID: \"88879497-9ba4-4940-975d-d872f5fcccc9\") " pod="openshift-image-registry/node-ca-dsj42" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.973053 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.982466 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.982531 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.982544 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.982563 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.982574 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:35Z","lastTransitionTime":"2025-11-24T13:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:35 crc kubenswrapper[5039]: I1124 13:18:35.986802 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:35Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.001872 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.013567 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.025989 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.039836 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.085027 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.085062 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.085071 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.085086 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.085095 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:36Z","lastTransitionTime":"2025-11-24T13:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.152256 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-dsj42" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.187321 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.187370 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.187378 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.187391 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.187399 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:36Z","lastTransitionTime":"2025-11-24T13:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.290764 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.290805 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.290815 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.290830 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.290840 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:36Z","lastTransitionTime":"2025-11-24T13:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.392930 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.392971 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.392982 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.392999 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.393009 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:36Z","lastTransitionTime":"2025-11-24T13:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.472687 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-k79vj" event={"ID":"8413bf0a-e541-473a-ae4a-155c6f91b570","Type":"ContainerStarted","Data":"07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e"} Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.475931 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerStarted","Data":"86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224"} Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.475975 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerStarted","Data":"95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90"} Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.475999 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerStarted","Data":"045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614"} Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.476008 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerStarted","Data":"7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a"} Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.476017 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerStarted","Data":"2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad"} Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.476025 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerStarted","Data":"d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb"} Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.476996 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-dsj42" event={"ID":"88879497-9ba4-4940-975d-d872f5fcccc9","Type":"ContainerStarted","Data":"fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18"} Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.477023 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-dsj42" event={"ID":"88879497-9ba4-4940-975d-d872f5fcccc9","Type":"ContainerStarted","Data":"a0c07c74c150d62adcfb68795bd832c4e96de2bd6c5746ae88153b1b14b407b9"} Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.478793 5039 generic.go:334] "Generic (PLEG): container finished" podID="2c35ff00-6898-4235-af87-d46e63a20111" containerID="9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1" exitCode=0 Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.478817 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" event={"ID":"2c35ff00-6898-4235-af87-d46e63a20111","Type":"ContainerDied","Data":"9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1"} Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.484584 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.494799 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.494862 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.494885 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.494914 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.494937 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:36Z","lastTransitionTime":"2025-11-24T13:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.506408 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.516160 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.530882 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.546837 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.563073 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.575335 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.592080 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.597027 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.597059 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.597071 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.597085 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.597093 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:36Z","lastTransitionTime":"2025-11-24T13:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.603032 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.615116 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.628189 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.639445 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.651244 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.663994 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.674988 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.685235 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.699390 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.699430 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.699442 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.699459 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.699470 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:36Z","lastTransitionTime":"2025-11-24T13:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.714398 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.766466 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.792197 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.802132 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.802162 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.802172 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.802184 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.802194 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:36Z","lastTransitionTime":"2025-11-24T13:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.834221 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.861904 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:18:36 crc kubenswrapper[5039]: E1124 13:18:36.862077 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:18:44.862046213 +0000 UTC m=+37.301170723 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.878025 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.904078 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.904129 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.904143 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.904163 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.904175 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:36Z","lastTransitionTime":"2025-11-24T13:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.918662 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.955467 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.963106 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.963173 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.963218 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.963257 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:36 crc kubenswrapper[5039]: E1124 13:18:36.963387 5039 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 13:18:36 crc kubenswrapper[5039]: E1124 13:18:36.963460 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:44.963439132 +0000 UTC m=+37.402563652 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 13:18:36 crc kubenswrapper[5039]: E1124 13:18:36.963754 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 13:18:36 crc kubenswrapper[5039]: E1124 13:18:36.963885 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 13:18:36 crc kubenswrapper[5039]: E1124 13:18:36.964025 5039 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:36 crc kubenswrapper[5039]: E1124 13:18:36.964192 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:44.96417346 +0000 UTC m=+37.403297970 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:36 crc kubenswrapper[5039]: E1124 13:18:36.963772 5039 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 13:18:36 crc kubenswrapper[5039]: E1124 13:18:36.964466 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:44.964449038 +0000 UTC m=+37.403573548 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 13:18:36 crc kubenswrapper[5039]: E1124 13:18:36.963814 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 13:18:36 crc kubenswrapper[5039]: E1124 13:18:36.964783 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 13:18:36 crc kubenswrapper[5039]: E1124 13:18:36.964922 5039 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:36 crc kubenswrapper[5039]: E1124 13:18:36.965063 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:44.965049794 +0000 UTC m=+37.404174304 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:36 crc kubenswrapper[5039]: I1124 13:18:36.994126 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:36Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.005829 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.005860 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.005869 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.005882 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.005893 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:37Z","lastTransitionTime":"2025-11-24T13:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.035269 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.076853 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.108754 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.108801 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.108814 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.108833 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.108845 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:37Z","lastTransitionTime":"2025-11-24T13:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.123608 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.158620 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.210988 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.211043 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.211058 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.211078 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.211094 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:37Z","lastTransitionTime":"2025-11-24T13:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.306628 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.306650 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:37 crc kubenswrapper[5039]: E1124 13:18:37.306861 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.306677 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:37 crc kubenswrapper[5039]: E1124 13:18:37.307035 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:18:37 crc kubenswrapper[5039]: E1124 13:18:37.307084 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.314212 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.314259 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.314271 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.314299 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.314312 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:37Z","lastTransitionTime":"2025-11-24T13:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.417015 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.417093 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.417107 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.417132 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.417151 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:37Z","lastTransitionTime":"2025-11-24T13:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.486832 5039 generic.go:334] "Generic (PLEG): container finished" podID="2c35ff00-6898-4235-af87-d46e63a20111" containerID="3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec" exitCode=0 Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.486966 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" event={"ID":"2c35ff00-6898-4235-af87-d46e63a20111","Type":"ContainerDied","Data":"3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec"} Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.520010 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.520340 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.519970 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.520361 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.520577 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.520596 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:37Z","lastTransitionTime":"2025-11-24T13:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.533034 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.547861 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.560547 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.577859 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.587299 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.599624 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.614357 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.623032 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.623179 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.623280 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.623383 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.623481 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:37Z","lastTransitionTime":"2025-11-24T13:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.626492 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.640094 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.660400 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.678438 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.691559 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.717367 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.726265 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.726310 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.726322 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.726337 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.726349 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:37Z","lastTransitionTime":"2025-11-24T13:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.828962 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.829002 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.829009 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.829024 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.829033 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:37Z","lastTransitionTime":"2025-11-24T13:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.931063 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.931130 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.931153 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.931181 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:37 crc kubenswrapper[5039]: I1124 13:18:37.931203 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:37Z","lastTransitionTime":"2025-11-24T13:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.034174 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.034218 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.034228 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.034246 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.034256 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:38Z","lastTransitionTime":"2025-11-24T13:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.137456 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.137549 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.137574 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.137602 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.137621 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:38Z","lastTransitionTime":"2025-11-24T13:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.239294 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.239343 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.239355 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.239373 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.239385 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:38Z","lastTransitionTime":"2025-11-24T13:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.328551 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.342695 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.342769 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.342791 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.342821 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.342847 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:38Z","lastTransitionTime":"2025-11-24T13:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.347421 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.358214 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.378318 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.394435 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.406321 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.417876 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.430679 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.446203 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.446267 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.446286 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.446318 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.446342 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:38Z","lastTransitionTime":"2025-11-24T13:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.448370 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.464169 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.479255 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.491907 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.492285 5039 generic.go:334] "Generic (PLEG): container finished" podID="2c35ff00-6898-4235-af87-d46e63a20111" containerID="c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d" exitCode=0 Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.492312 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" event={"ID":"2c35ff00-6898-4235-af87-d46e63a20111","Type":"ContainerDied","Data":"c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d"} Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.502624 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.519004 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.532879 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.546459 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.548574 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.548632 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.548648 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.548665 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.548677 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:38Z","lastTransitionTime":"2025-11-24T13:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.557241 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.566487 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.578490 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.598471 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.612671 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.622567 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.633887 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.650607 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.650647 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.650658 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.650673 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.650682 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:38Z","lastTransitionTime":"2025-11-24T13:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.676040 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.713548 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.752521 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.752571 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.752584 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.752602 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.752615 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:38Z","lastTransitionTime":"2025-11-24T13:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.753535 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.794523 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.834593 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.854892 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.854930 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.854940 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.854957 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.854969 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:38Z","lastTransitionTime":"2025-11-24T13:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.957889 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.957939 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.957956 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.957977 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:38 crc kubenswrapper[5039]: I1124 13:18:38.957992 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:38Z","lastTransitionTime":"2025-11-24T13:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.060721 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.060761 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.060771 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.060786 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.060796 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:39Z","lastTransitionTime":"2025-11-24T13:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.163658 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.163719 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.163731 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.163745 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.163757 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:39Z","lastTransitionTime":"2025-11-24T13:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.266867 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.266958 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.267008 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.267034 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.267050 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:39Z","lastTransitionTime":"2025-11-24T13:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.305754 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.305798 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:39 crc kubenswrapper[5039]: E1124 13:18:39.305905 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.305983 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:39 crc kubenswrapper[5039]: E1124 13:18:39.306042 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:18:39 crc kubenswrapper[5039]: E1124 13:18:39.306180 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.370338 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.370370 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.370380 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.370393 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.370401 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:39Z","lastTransitionTime":"2025-11-24T13:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.474035 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.474075 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.474087 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.474117 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.474130 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:39Z","lastTransitionTime":"2025-11-24T13:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.501933 5039 generic.go:334] "Generic (PLEG): container finished" podID="2c35ff00-6898-4235-af87-d46e63a20111" containerID="532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad" exitCode=0 Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.502293 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" event={"ID":"2c35ff00-6898-4235-af87-d46e63a20111","Type":"ContainerDied","Data":"532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad"} Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.509057 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerStarted","Data":"7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288"} Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.531054 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:39Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.546929 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:39Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.556338 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:39Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.572679 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:39Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.576161 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.576200 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.576211 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.576228 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.576243 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:39Z","lastTransitionTime":"2025-11-24T13:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.585831 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:39Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.597457 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:39Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.611092 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:39Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.624006 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:39Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.639371 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:39Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.652741 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:39Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.665810 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:39Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.679375 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.679646 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.679663 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.679676 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.679685 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:39Z","lastTransitionTime":"2025-11-24T13:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.714982 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:39Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.727685 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:39Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.748867 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:39Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.782653 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.782695 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.782704 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.782719 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.782730 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:39Z","lastTransitionTime":"2025-11-24T13:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.885860 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.885900 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.885912 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.885929 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.885941 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:39Z","lastTransitionTime":"2025-11-24T13:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.989705 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.989951 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.990030 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.990123 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:39 crc kubenswrapper[5039]: I1124 13:18:39.990201 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:39Z","lastTransitionTime":"2025-11-24T13:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.099439 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.099498 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.099570 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.099600 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.099620 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:40Z","lastTransitionTime":"2025-11-24T13:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.202176 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.202234 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.202250 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.202270 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.202285 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:40Z","lastTransitionTime":"2025-11-24T13:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.305440 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.305548 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.305567 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.305590 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.305608 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:40Z","lastTransitionTime":"2025-11-24T13:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.407612 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.407863 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.407945 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.408032 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.408094 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:40Z","lastTransitionTime":"2025-11-24T13:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.510700 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.510775 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.510793 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.510822 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.510846 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:40Z","lastTransitionTime":"2025-11-24T13:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.517133 5039 generic.go:334] "Generic (PLEG): container finished" podID="2c35ff00-6898-4235-af87-d46e63a20111" containerID="cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967" exitCode=0 Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.517191 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" event={"ID":"2c35ff00-6898-4235-af87-d46e63a20111","Type":"ContainerDied","Data":"cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967"} Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.538157 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:40Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.563195 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:40Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.576924 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:40Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.595811 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:40Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.606763 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:40Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.612937 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.612982 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.612999 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.613023 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.613042 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:40Z","lastTransitionTime":"2025-11-24T13:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.618616 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:40Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.635867 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:40Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.663683 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:40Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.680655 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:40Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.697927 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:40Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.715541 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.715585 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.715597 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.715614 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.715624 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:40Z","lastTransitionTime":"2025-11-24T13:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.718492 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:40Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.728537 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:40Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.744013 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:40Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.754433 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:40Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.818268 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.818325 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.818334 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.818348 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.818358 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:40Z","lastTransitionTime":"2025-11-24T13:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.920657 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.920696 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.920709 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.920725 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:40 crc kubenswrapper[5039]: I1124 13:18:40.920773 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:40Z","lastTransitionTime":"2025-11-24T13:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.023428 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.023476 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.023488 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.023526 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.023541 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:41Z","lastTransitionTime":"2025-11-24T13:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.126461 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.126536 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.126588 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.126609 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.126626 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:41Z","lastTransitionTime":"2025-11-24T13:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.229768 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.229821 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.229839 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.229862 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.229878 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:41Z","lastTransitionTime":"2025-11-24T13:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.306065 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:41 crc kubenswrapper[5039]: E1124 13:18:41.306234 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.306686 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.306721 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:41 crc kubenswrapper[5039]: E1124 13:18:41.306822 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:18:41 crc kubenswrapper[5039]: E1124 13:18:41.306975 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.333161 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.333228 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.333252 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.333281 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.333306 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:41Z","lastTransitionTime":"2025-11-24T13:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.436497 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.436600 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.436634 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.436664 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.436685 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:41Z","lastTransitionTime":"2025-11-24T13:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.524949 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" event={"ID":"2c35ff00-6898-4235-af87-d46e63a20111","Type":"ContainerStarted","Data":"e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485"} Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.531224 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerStarted","Data":"35a4df3f22e7ea0065fc90d25afbe28e5b3a398d099b4c6176d03daa61dfb931"} Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.531606 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.531785 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.540434 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.541660 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.541690 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.541719 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.541741 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:41Z","lastTransitionTime":"2025-11-24T13:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.547472 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.563230 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.563934 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.564403 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.577717 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.589662 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.601317 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.622756 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.641151 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.644752 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.644787 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.644801 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.644819 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.644831 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:41Z","lastTransitionTime":"2025-11-24T13:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.657326 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.668571 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.679894 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.693380 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.708996 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.712209 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.721858 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.735748 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.748223 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.748460 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.748590 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.748681 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.748788 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:41Z","lastTransitionTime":"2025-11-24T13:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.749490 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.760818 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.772733 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.781319 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.790747 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.806559 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35a4df3f22e7ea0065fc90d25afbe28e5b3a398d099b4c6176d03daa61dfb931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.817680 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.833956 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.844291 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.851161 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.851357 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.851545 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.851686 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.851895 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:41Z","lastTransitionTime":"2025-11-24T13:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.855402 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.867140 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.878997 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.889492 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.899785 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:41Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.953830 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.954067 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.954133 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.954226 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:41 crc kubenswrapper[5039]: I1124 13:18:41.954299 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:41Z","lastTransitionTime":"2025-11-24T13:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.056628 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.056679 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.056697 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.056719 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.056736 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:42Z","lastTransitionTime":"2025-11-24T13:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.163097 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.163150 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.163299 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.163362 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.163378 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:42Z","lastTransitionTime":"2025-11-24T13:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.266781 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.266836 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.266849 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.266867 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.266881 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:42Z","lastTransitionTime":"2025-11-24T13:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.369566 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.369648 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.369679 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.369711 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.369735 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:42Z","lastTransitionTime":"2025-11-24T13:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.472695 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.472744 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.472761 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.472785 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.472801 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:42Z","lastTransitionTime":"2025-11-24T13:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.534730 5039 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.575576 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.575650 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.575678 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.575707 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.575728 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:42Z","lastTransitionTime":"2025-11-24T13:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.678094 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.678144 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.678161 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.678184 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.678200 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:42Z","lastTransitionTime":"2025-11-24T13:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.781498 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.781594 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.781619 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.781656 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.781681 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:42Z","lastTransitionTime":"2025-11-24T13:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.884955 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.885007 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.885022 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.885043 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.885059 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:42Z","lastTransitionTime":"2025-11-24T13:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.988217 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.988262 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.988277 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.988299 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:42 crc kubenswrapper[5039]: I1124 13:18:42.988311 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:42Z","lastTransitionTime":"2025-11-24T13:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.092134 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.092190 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.092207 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.092231 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.092249 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:43Z","lastTransitionTime":"2025-11-24T13:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.194232 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.194283 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.194296 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.194315 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.194327 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:43Z","lastTransitionTime":"2025-11-24T13:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.297026 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.297068 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.297081 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.297101 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.297114 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:43Z","lastTransitionTime":"2025-11-24T13:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.306449 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:43 crc kubenswrapper[5039]: E1124 13:18:43.306605 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.306450 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.306728 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:43 crc kubenswrapper[5039]: E1124 13:18:43.306907 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:18:43 crc kubenswrapper[5039]: E1124 13:18:43.306774 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.399669 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.399746 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.399770 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.399800 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.399832 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:43Z","lastTransitionTime":"2025-11-24T13:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.503205 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.503293 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.503317 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.503355 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.503379 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:43Z","lastTransitionTime":"2025-11-24T13:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.538099 5039 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.606899 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.606960 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.606976 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.607000 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.607018 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:43Z","lastTransitionTime":"2025-11-24T13:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.709617 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.709669 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.709692 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.709724 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.709749 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:43Z","lastTransitionTime":"2025-11-24T13:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.812869 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.812934 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.812957 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.812986 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.813008 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:43Z","lastTransitionTime":"2025-11-24T13:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.915971 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.916032 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.916049 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.916075 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:43 crc kubenswrapper[5039]: I1124 13:18:43.916142 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:43Z","lastTransitionTime":"2025-11-24T13:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.019106 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.019169 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.019192 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.019219 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.019241 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:44Z","lastTransitionTime":"2025-11-24T13:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.123110 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.123238 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.123258 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.123287 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.123307 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:44Z","lastTransitionTime":"2025-11-24T13:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.226890 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.226953 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.226972 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.226998 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.227018 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:44Z","lastTransitionTime":"2025-11-24T13:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.329828 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.329885 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.329903 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.329927 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.329945 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:44Z","lastTransitionTime":"2025-11-24T13:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.434985 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.435061 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.435149 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.435179 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.435208 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:44Z","lastTransitionTime":"2025-11-24T13:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.538560 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.538624 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.538648 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.538677 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.538700 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:44Z","lastTransitionTime":"2025-11-24T13:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.543588 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w2ctb_54c05b03-6747-47bf-a40d-8a9332c4d856/ovnkube-controller/0.log" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.546868 5039 generic.go:334] "Generic (PLEG): container finished" podID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerID="35a4df3f22e7ea0065fc90d25afbe28e5b3a398d099b4c6176d03daa61dfb931" exitCode=1 Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.546922 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerDied","Data":"35a4df3f22e7ea0065fc90d25afbe28e5b3a398d099b4c6176d03daa61dfb931"} Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.548072 5039 scope.go:117] "RemoveContainer" containerID="35a4df3f22e7ea0065fc90d25afbe28e5b3a398d099b4c6176d03daa61dfb931" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.571026 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:44Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.597280 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:44Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.617861 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:44Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.636468 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:44Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.642172 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.642211 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.642223 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.642240 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.642252 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:44Z","lastTransitionTime":"2025-11-24T13:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.652835 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:44Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.670221 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:44Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.685870 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:44Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.700430 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:44Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.716597 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:44Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.732505 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:44Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.744647 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.744689 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.744704 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.744727 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.744743 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:44Z","lastTransitionTime":"2025-11-24T13:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.746850 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:44Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.765936 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://35a4df3f22e7ea0065fc90d25afbe28e5b3a398d099b4c6176d03daa61dfb931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35a4df3f22e7ea0065fc90d25afbe28e5b3a398d099b4c6176d03daa61dfb931\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:18:43Z\\\",\\\"message\\\":\\\"9235 6361 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.839437 6361 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 13:18:43.839583 6361 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.839723 6361 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.839896 6361 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 13:18:43.840043 6361 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 13:18:43.840095 6361 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.840621 6361 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 13:18:43.840693 6361 factory.go:656] Stopping watch factory\\\\nI1124 13:18:43.840718 6361 ovnkube.go:599] Stopped ovnkube\\\\nI1124 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:44Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.778795 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:44Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.789857 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:44Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.847104 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.847149 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.847161 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.847178 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.847189 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:44Z","lastTransitionTime":"2025-11-24T13:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.946924 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:18:44 crc kubenswrapper[5039]: E1124 13:18:44.947182 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:19:00.947156772 +0000 UTC m=+53.386281312 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.949519 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.949547 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.949557 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.949577 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:44 crc kubenswrapper[5039]: I1124 13:18:44.949594 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:44Z","lastTransitionTime":"2025-11-24T13:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.048259 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.048315 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.048343 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.048376 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.048485 5039 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.048555 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 13:19:01.04853997 +0000 UTC m=+53.487664470 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.048895 5039 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.048937 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 13:19:01.04892635 +0000 UTC m=+53.488050860 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.049001 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.049019 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.049031 5039 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.049058 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 13:19:01.049049903 +0000 UTC m=+53.488174413 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.049111 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.049124 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.049135 5039 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.049161 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 13:19:01.049152937 +0000 UTC m=+53.488277437 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.052359 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.052393 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.052402 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.052417 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.052428 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:45Z","lastTransitionTime":"2025-11-24T13:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.154483 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.154544 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.154556 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.154573 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.154584 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:45Z","lastTransitionTime":"2025-11-24T13:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.256871 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.256926 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.256945 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.256969 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.257023 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:45Z","lastTransitionTime":"2025-11-24T13:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.305757 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.305807 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.305761 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.305937 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.306014 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.306112 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.359385 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.359452 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.359474 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.359498 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.359546 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:45Z","lastTransitionTime":"2025-11-24T13:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.450283 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.450314 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.450323 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.450339 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.450348 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:45Z","lastTransitionTime":"2025-11-24T13:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.469284 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:45Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.473246 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.473293 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.473306 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.473323 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.473335 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:45Z","lastTransitionTime":"2025-11-24T13:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.492296 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:45Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.496066 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.496106 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.496115 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.496136 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.496147 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:45Z","lastTransitionTime":"2025-11-24T13:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.513088 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:45Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.517366 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.517396 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.517406 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.517422 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.517435 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:45Z","lastTransitionTime":"2025-11-24T13:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.528221 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:45Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.532081 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.532129 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.532143 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.532161 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.532173 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:45Z","lastTransitionTime":"2025-11-24T13:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.546955 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:45Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.547120 5039 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.548966 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.549006 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.549017 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.549034 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.549047 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:45Z","lastTransitionTime":"2025-11-24T13:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.551323 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w2ctb_54c05b03-6747-47bf-a40d-8a9332c4d856/ovnkube-controller/1.log" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.551909 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w2ctb_54c05b03-6747-47bf-a40d-8a9332c4d856/ovnkube-controller/0.log" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.554623 5039 generic.go:334] "Generic (PLEG): container finished" podID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerID="a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043" exitCode=1 Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.554661 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerDied","Data":"a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043"} Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.554706 5039 scope.go:117] "RemoveContainer" containerID="35a4df3f22e7ea0065fc90d25afbe28e5b3a398d099b4c6176d03daa61dfb931" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.555772 5039 scope.go:117] "RemoveContainer" containerID="a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043" Nov 24 13:18:45 crc kubenswrapper[5039]: E1124 13:18:45.556020 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-w2ctb_openshift-ovn-kubernetes(54c05b03-6747-47bf-a40d-8a9332c4d856)\"" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.567470 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:45Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.580085 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:45Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.592492 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:45Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.606180 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:45Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.621889 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:45Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.640834 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:45Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.651729 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.651797 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.651820 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.651852 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.651874 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:45Z","lastTransitionTime":"2025-11-24T13:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.659237 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:45Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.678224 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:45Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.695050 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:45Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.735716 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35a4df3f22e7ea0065fc90d25afbe28e5b3a398d099b4c6176d03daa61dfb931\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:18:43Z\\\",\\\"message\\\":\\\"9235 6361 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.839437 6361 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 13:18:43.839583 6361 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.839723 6361 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.839896 6361 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 13:18:43.840043 6361 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 13:18:43.840095 6361 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.840621 6361 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 13:18:43.840693 6361 factory.go:656] Stopping watch factory\\\\nI1124 13:18:43.840718 6361 ovnkube.go:599] Stopped ovnkube\\\\nI1124 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"Map:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:169.254.0.2:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {4de02fb8-85f8-4208-9384-785ba5457d16}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:default/kubernetes]} name:Service_default/kubernetes_TCP_node_switch_crc options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:192.168.126.11:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {b21188fe-5483-4717-afe6-20a41a40b91a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 13:18:45.437891 6505 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1124 13:18:45.437958 6505 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1124 13:18:45.437988 6505 ovnkube.go:599] Stopped ovnkube\\\\nI1124 13:18:45.438015 6505 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 13:18:45.438073 6505 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:45Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.750765 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:45Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.754286 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.754646 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.754767 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.754907 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.755032 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:45Z","lastTransitionTime":"2025-11-24T13:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.762461 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:45Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.780451 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:45Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.799882 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:45Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.857284 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.857401 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.857470 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.857581 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.857658 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:45Z","lastTransitionTime":"2025-11-24T13:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.961119 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.961160 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.961169 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.961186 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:45 crc kubenswrapper[5039]: I1124 13:18:45.961196 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:45Z","lastTransitionTime":"2025-11-24T13:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.064399 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.064461 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.064479 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.064564 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.064583 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:46Z","lastTransitionTime":"2025-11-24T13:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.166958 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.167036 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.167063 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.167096 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.167121 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:46Z","lastTransitionTime":"2025-11-24T13:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.270275 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.270331 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.270345 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.270367 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.270382 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:46Z","lastTransitionTime":"2025-11-24T13:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.373936 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.374043 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.374063 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.374124 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.374144 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:46Z","lastTransitionTime":"2025-11-24T13:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.477350 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.477404 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.477424 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.477452 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.477479 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:46Z","lastTransitionTime":"2025-11-24T13:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.533410 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc"] Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.534213 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.536059 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.537320 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.557268 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:46Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.561714 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w2ctb_54c05b03-6747-47bf-a40d-8a9332c4d856/ovnkube-controller/1.log" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.580831 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.580902 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.580927 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.580960 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.580980 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:46Z","lastTransitionTime":"2025-11-24T13:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.580771 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:46Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.596139 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:46Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.615421 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:46Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.634891 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:46Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.650165 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-v5nbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:46Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.665141 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a2c72781-6c62-4ca8-abee-e9e692ab4a3e-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-v5nbc\" (UID: \"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.665208 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a2c72781-6c62-4ca8-abee-e9e692ab4a3e-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-v5nbc\" (UID: \"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.665241 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2lfmk\" (UniqueName: \"kubernetes.io/projected/a2c72781-6c62-4ca8-abee-e9e692ab4a3e-kube-api-access-2lfmk\") pod \"ovnkube-control-plane-749d76644c-v5nbc\" (UID: \"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.665283 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a2c72781-6c62-4ca8-abee-e9e692ab4a3e-env-overrides\") pod \"ovnkube-control-plane-749d76644c-v5nbc\" (UID: \"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.665365 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:46Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.683618 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.683677 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.683692 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.683709 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.683722 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:46Z","lastTransitionTime":"2025-11-24T13:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.684936 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:46Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.701098 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:46Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.716167 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:46Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.728923 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:46Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.754223 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35a4df3f22e7ea0065fc90d25afbe28e5b3a398d099b4c6176d03daa61dfb931\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:18:43Z\\\",\\\"message\\\":\\\"9235 6361 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.839437 6361 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 13:18:43.839583 6361 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.839723 6361 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.839896 6361 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 13:18:43.840043 6361 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 13:18:43.840095 6361 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.840621 6361 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 13:18:43.840693 6361 factory.go:656] Stopping watch factory\\\\nI1124 13:18:43.840718 6361 ovnkube.go:599] Stopped ovnkube\\\\nI1124 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"Map:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:169.254.0.2:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {4de02fb8-85f8-4208-9384-785ba5457d16}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:default/kubernetes]} name:Service_default/kubernetes_TCP_node_switch_crc options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:192.168.126.11:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {b21188fe-5483-4717-afe6-20a41a40b91a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 13:18:45.437891 6505 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1124 13:18:45.437958 6505 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1124 13:18:45.437988 6505 ovnkube.go:599] Stopped ovnkube\\\\nI1124 13:18:45.438015 6505 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 13:18:45.438073 6505 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:46Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.765847 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a2c72781-6c62-4ca8-abee-e9e692ab4a3e-env-overrides\") pod \"ovnkube-control-plane-749d76644c-v5nbc\" (UID: \"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.765926 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a2c72781-6c62-4ca8-abee-e9e692ab4a3e-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-v5nbc\" (UID: \"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.765951 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2lfmk\" (UniqueName: \"kubernetes.io/projected/a2c72781-6c62-4ca8-abee-e9e692ab4a3e-kube-api-access-2lfmk\") pod \"ovnkube-control-plane-749d76644c-v5nbc\" (UID: \"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.765974 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a2c72781-6c62-4ca8-abee-e9e692ab4a3e-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-v5nbc\" (UID: \"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.767123 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a2c72781-6c62-4ca8-abee-e9e692ab4a3e-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-v5nbc\" (UID: \"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.767584 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a2c72781-6c62-4ca8-abee-e9e692ab4a3e-env-overrides\") pod \"ovnkube-control-plane-749d76644c-v5nbc\" (UID: \"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.769389 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:46Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.772843 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a2c72781-6c62-4ca8-abee-e9e692ab4a3e-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-v5nbc\" (UID: \"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.786398 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.786440 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.786457 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.786477 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.786493 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:46Z","lastTransitionTime":"2025-11-24T13:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.793098 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:46Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.795238 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2lfmk\" (UniqueName: \"kubernetes.io/projected/a2c72781-6c62-4ca8-abee-e9e692ab4a3e-kube-api-access-2lfmk\") pod \"ovnkube-control-plane-749d76644c-v5nbc\" (UID: \"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.806620 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:46Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.855915 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" Nov 24 13:18:46 crc kubenswrapper[5039]: W1124 13:18:46.869838 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda2c72781_6c62_4ca8_abee_e9e692ab4a3e.slice/crio-7156bc212f82e923edd341dd79b6d75a00a0d8b301ab23eaa61f43285c275e31 WatchSource:0}: Error finding container 7156bc212f82e923edd341dd79b6d75a00a0d8b301ab23eaa61f43285c275e31: Status 404 returned error can't find the container with id 7156bc212f82e923edd341dd79b6d75a00a0d8b301ab23eaa61f43285c275e31 Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.889257 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.889305 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.889323 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.889346 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.889362 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:46Z","lastTransitionTime":"2025-11-24T13:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.991605 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.991650 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.991666 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.991686 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:46 crc kubenswrapper[5039]: I1124 13:18:46.991702 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:46Z","lastTransitionTime":"2025-11-24T13:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.094225 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.094262 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.094276 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.094296 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.094311 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:47Z","lastTransitionTime":"2025-11-24T13:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.196112 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.196160 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.196182 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.196203 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.196219 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:47Z","lastTransitionTime":"2025-11-24T13:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.299054 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.299120 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.299144 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.299172 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.299191 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:47Z","lastTransitionTime":"2025-11-24T13:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.306329 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.306360 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.306420 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:47 crc kubenswrapper[5039]: E1124 13:18:47.306472 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:18:47 crc kubenswrapper[5039]: E1124 13:18:47.306624 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:18:47 crc kubenswrapper[5039]: E1124 13:18:47.306748 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.401850 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.401888 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.401897 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.401912 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.401921 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:47Z","lastTransitionTime":"2025-11-24T13:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.504364 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.504415 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.504424 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.504441 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.504456 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:47Z","lastTransitionTime":"2025-11-24T13:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.572242 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" event={"ID":"a2c72781-6c62-4ca8-abee-e9e692ab4a3e","Type":"ContainerStarted","Data":"3c8f2f7f29ea14926868534547c27d44a5ad0bb742fd042f42849590e34a54e8"} Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.572913 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" event={"ID":"a2c72781-6c62-4ca8-abee-e9e692ab4a3e","Type":"ContainerStarted","Data":"bb6a0812c1e533294fbc653814a92d6672c2d3479d5b28c2c2d5dafae604916c"} Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.572965 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" event={"ID":"a2c72781-6c62-4ca8-abee-e9e692ab4a3e","Type":"ContainerStarted","Data":"7156bc212f82e923edd341dd79b6d75a00a0d8b301ab23eaa61f43285c275e31"} Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.587579 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:47Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.603605 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:47Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.607622 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.607855 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.607882 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.607909 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.607922 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:47Z","lastTransitionTime":"2025-11-24T13:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.636457 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35a4df3f22e7ea0065fc90d25afbe28e5b3a398d099b4c6176d03daa61dfb931\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:18:43Z\\\",\\\"message\\\":\\\"9235 6361 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.839437 6361 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 13:18:43.839583 6361 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.839723 6361 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.839896 6361 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 13:18:43.840043 6361 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 13:18:43.840095 6361 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.840621 6361 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 13:18:43.840693 6361 factory.go:656] Stopping watch factory\\\\nI1124 13:18:43.840718 6361 ovnkube.go:599] Stopped ovnkube\\\\nI1124 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"Map:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:169.254.0.2:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {4de02fb8-85f8-4208-9384-785ba5457d16}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:default/kubernetes]} name:Service_default/kubernetes_TCP_node_switch_crc options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:192.168.126.11:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {b21188fe-5483-4717-afe6-20a41a40b91a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 13:18:45.437891 6505 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1124 13:18:45.437958 6505 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1124 13:18:45.437988 6505 ovnkube.go:599] Stopped ovnkube\\\\nI1124 13:18:45.438015 6505 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 13:18:45.438073 6505 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:47Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.655142 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:47Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.711571 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.711625 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.711644 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.711668 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.711687 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:47Z","lastTransitionTime":"2025-11-24T13:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.727753 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:47Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.743131 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:47Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.756050 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:47Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.776208 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:47Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.791652 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:47Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.804161 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:47Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.814359 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.814415 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.814425 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.814448 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.814460 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:47Z","lastTransitionTime":"2025-11-24T13:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.815965 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:47Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.828229 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb6a0812c1e533294fbc653814a92d6672c2d3479d5b28c2c2d5dafae604916c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c8f2f7f29ea14926868534547c27d44a5ad0bb742fd042f42849590e34a54e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-v5nbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:47Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.841019 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:47Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.854112 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:47Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.869678 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:47Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.916521 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.916545 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.916555 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.916569 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:47 crc kubenswrapper[5039]: I1124 13:18:47.916580 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:47Z","lastTransitionTime":"2025-11-24T13:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.019354 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.019415 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.019436 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.019464 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.019486 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:48Z","lastTransitionTime":"2025-11-24T13:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.089024 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-vnpwt"] Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.090017 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:18:48 crc kubenswrapper[5039]: E1124 13:18:48.090144 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.122788 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.123645 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.123832 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.124027 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.124210 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:48Z","lastTransitionTime":"2025-11-24T13:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.124427 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35a4df3f22e7ea0065fc90d25afbe28e5b3a398d099b4c6176d03daa61dfb931\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:18:43Z\\\",\\\"message\\\":\\\"9235 6361 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.839437 6361 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 13:18:43.839583 6361 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.839723 6361 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.839896 6361 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 13:18:43.840043 6361 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 13:18:43.840095 6361 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.840621 6361 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 13:18:43.840693 6361 factory.go:656] Stopping watch factory\\\\nI1124 13:18:43.840718 6361 ovnkube.go:599] Stopped ovnkube\\\\nI1124 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"Map:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:169.254.0.2:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {4de02fb8-85f8-4208-9384-785ba5457d16}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:default/kubernetes]} name:Service_default/kubernetes_TCP_node_switch_crc options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:192.168.126.11:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {b21188fe-5483-4717-afe6-20a41a40b91a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 13:18:45.437891 6505 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1124 13:18:45.437958 6505 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1124 13:18:45.437988 6505 ovnkube.go:599] Stopped ovnkube\\\\nI1124 13:18:45.438015 6505 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 13:18:45.438073 6505 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.143250 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vnpwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5926107d-81bc-4e34-9e27-8018cbccf590\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vnpwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.162261 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.179929 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.195105 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.211928 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.227946 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.227994 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.228011 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.228035 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.228051 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:48Z","lastTransitionTime":"2025-11-24T13:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.228748 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.247699 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.263045 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.280619 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb6a0812c1e533294fbc653814a92d6672c2d3479d5b28c2c2d5dafae604916c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c8f2f7f29ea14926868534547c27d44a5ad0bb742fd042f42849590e34a54e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-v5nbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.280946 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wprl\" (UniqueName: \"kubernetes.io/projected/5926107d-81bc-4e34-9e27-8018cbccf590-kube-api-access-4wprl\") pod \"network-metrics-daemon-vnpwt\" (UID: \"5926107d-81bc-4e34-9e27-8018cbccf590\") " pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.281133 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs\") pod \"network-metrics-daemon-vnpwt\" (UID: \"5926107d-81bc-4e34-9e27-8018cbccf590\") " pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.299865 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.321674 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.331075 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.331123 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.331140 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.331159 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.331174 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:48Z","lastTransitionTime":"2025-11-24T13:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.339053 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.358582 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.375795 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.382261 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wprl\" (UniqueName: \"kubernetes.io/projected/5926107d-81bc-4e34-9e27-8018cbccf590-kube-api-access-4wprl\") pod \"network-metrics-daemon-vnpwt\" (UID: \"5926107d-81bc-4e34-9e27-8018cbccf590\") " pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.382368 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs\") pod \"network-metrics-daemon-vnpwt\" (UID: \"5926107d-81bc-4e34-9e27-8018cbccf590\") " pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:18:48 crc kubenswrapper[5039]: E1124 13:18:48.382563 5039 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 13:18:48 crc kubenswrapper[5039]: E1124 13:18:48.382689 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs podName:5926107d-81bc-4e34-9e27-8018cbccf590 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:48.882666376 +0000 UTC m=+41.321790916 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs") pod "network-metrics-daemon-vnpwt" (UID: "5926107d-81bc-4e34-9e27-8018cbccf590") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.395607 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.411461 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wprl\" (UniqueName: \"kubernetes.io/projected/5926107d-81bc-4e34-9e27-8018cbccf590-kube-api-access-4wprl\") pod \"network-metrics-daemon-vnpwt\" (UID: \"5926107d-81bc-4e34-9e27-8018cbccf590\") " pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.432745 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35a4df3f22e7ea0065fc90d25afbe28e5b3a398d099b4c6176d03daa61dfb931\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:18:43Z\\\",\\\"message\\\":\\\"9235 6361 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.839437 6361 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 13:18:43.839583 6361 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.839723 6361 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.839896 6361 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 13:18:43.840043 6361 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 13:18:43.840095 6361 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:18:43.840621 6361 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 13:18:43.840693 6361 factory.go:656] Stopping watch factory\\\\nI1124 13:18:43.840718 6361 ovnkube.go:599] Stopped ovnkube\\\\nI1124 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"Map:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:169.254.0.2:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {4de02fb8-85f8-4208-9384-785ba5457d16}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:default/kubernetes]} name:Service_default/kubernetes_TCP_node_switch_crc options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:192.168.126.11:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {b21188fe-5483-4717-afe6-20a41a40b91a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 13:18:45.437891 6505 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1124 13:18:45.437958 6505 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1124 13:18:45.437988 6505 ovnkube.go:599] Stopped ovnkube\\\\nI1124 13:18:45.438015 6505 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 13:18:45.438073 6505 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.435942 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.435987 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.436003 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.436025 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.436041 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:48Z","lastTransitionTime":"2025-11-24T13:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.448004 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vnpwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5926107d-81bc-4e34-9e27-8018cbccf590\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vnpwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.461026 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.478866 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.503314 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.526270 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.538330 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.538368 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.538379 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.538394 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.538406 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:48Z","lastTransitionTime":"2025-11-24T13:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.539395 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.553621 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.571810 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.586882 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb6a0812c1e533294fbc653814a92d6672c2d3479d5b28c2c2d5dafae604916c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c8f2f7f29ea14926868534547c27d44a5ad0bb742fd042f42849590e34a54e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-v5nbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.606912 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.621294 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.633382 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.640930 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.640963 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.640973 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.640987 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.641004 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:48Z","lastTransitionTime":"2025-11-24T13:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.647219 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.662145 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.679295 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:48Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.743339 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.743377 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.743388 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.743403 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.743415 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:48Z","lastTransitionTime":"2025-11-24T13:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.845976 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.846012 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.846022 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.846035 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.846044 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:48Z","lastTransitionTime":"2025-11-24T13:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.885712 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs\") pod \"network-metrics-daemon-vnpwt\" (UID: \"5926107d-81bc-4e34-9e27-8018cbccf590\") " pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:18:48 crc kubenswrapper[5039]: E1124 13:18:48.885912 5039 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 13:18:48 crc kubenswrapper[5039]: E1124 13:18:48.886032 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs podName:5926107d-81bc-4e34-9e27-8018cbccf590 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:49.886002963 +0000 UTC m=+42.325127503 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs") pod "network-metrics-daemon-vnpwt" (UID: "5926107d-81bc-4e34-9e27-8018cbccf590") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.949545 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.949611 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.949646 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.949684 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:48 crc kubenswrapper[5039]: I1124 13:18:48.949706 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:48Z","lastTransitionTime":"2025-11-24T13:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.052919 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.052979 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.052996 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.053022 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.053040 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:49Z","lastTransitionTime":"2025-11-24T13:18:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.156927 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.157002 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.157027 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.157058 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.157084 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:49Z","lastTransitionTime":"2025-11-24T13:18:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.259587 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.259667 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.259685 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.259759 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.259785 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:49Z","lastTransitionTime":"2025-11-24T13:18:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.305845 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.305911 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.305975 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:49 crc kubenswrapper[5039]: E1124 13:18:49.306125 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:18:49 crc kubenswrapper[5039]: E1124 13:18:49.306261 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:18:49 crc kubenswrapper[5039]: E1124 13:18:49.306450 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.363496 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.363608 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.363632 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.363656 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.363674 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:49Z","lastTransitionTime":"2025-11-24T13:18:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.466001 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.466071 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.466088 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.466113 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.466130 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:49Z","lastTransitionTime":"2025-11-24T13:18:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.569003 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.569068 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.569087 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.569110 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.569127 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:49Z","lastTransitionTime":"2025-11-24T13:18:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.672019 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.672090 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.672110 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.672132 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.672149 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:49Z","lastTransitionTime":"2025-11-24T13:18:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.775042 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.775180 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.775200 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.775221 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.775238 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:49Z","lastTransitionTime":"2025-11-24T13:18:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.878853 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.878928 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.878952 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.878982 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.879007 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:49Z","lastTransitionTime":"2025-11-24T13:18:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.895484 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs\") pod \"network-metrics-daemon-vnpwt\" (UID: \"5926107d-81bc-4e34-9e27-8018cbccf590\") " pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:18:49 crc kubenswrapper[5039]: E1124 13:18:49.895737 5039 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 13:18:49 crc kubenswrapper[5039]: E1124 13:18:49.895842 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs podName:5926107d-81bc-4e34-9e27-8018cbccf590 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:51.895816627 +0000 UTC m=+44.334941167 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs") pod "network-metrics-daemon-vnpwt" (UID: "5926107d-81bc-4e34-9e27-8018cbccf590") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.983158 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.983223 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.983256 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.983285 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:49 crc kubenswrapper[5039]: I1124 13:18:49.983306 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:49Z","lastTransitionTime":"2025-11-24T13:18:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.086156 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.086229 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.086251 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.086280 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.086302 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:50Z","lastTransitionTime":"2025-11-24T13:18:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.189908 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.189956 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.189974 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.189996 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.190013 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:50Z","lastTransitionTime":"2025-11-24T13:18:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.292326 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.292447 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.292563 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.292607 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.292630 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:50Z","lastTransitionTime":"2025-11-24T13:18:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.306749 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:18:50 crc kubenswrapper[5039]: E1124 13:18:50.306998 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.395867 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.395936 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.395959 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.395987 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.396011 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:50Z","lastTransitionTime":"2025-11-24T13:18:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.499082 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.499149 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.499166 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.499196 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.499218 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:50Z","lastTransitionTime":"2025-11-24T13:18:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.558070 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.559568 5039 scope.go:117] "RemoveContainer" containerID="a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043" Nov 24 13:18:50 crc kubenswrapper[5039]: E1124 13:18:50.559980 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-w2ctb_openshift-ovn-kubernetes(54c05b03-6747-47bf-a40d-8a9332c4d856)\"" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.581154 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.602496 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.602586 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.602599 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.602617 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.602630 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:50Z","lastTransitionTime":"2025-11-24T13:18:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.603628 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.621047 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.640834 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.660437 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.678320 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb6a0812c1e533294fbc653814a92d6672c2d3479d5b28c2c2d5dafae604916c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c8f2f7f29ea14926868534547c27d44a5ad0bb742fd042f42849590e34a54e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-v5nbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.701069 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.705867 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.705926 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.705949 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.705980 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.706004 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:50Z","lastTransitionTime":"2025-11-24T13:18:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.722296 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.741452 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.758664 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.777867 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.808660 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.808718 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.808743 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.808774 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.808798 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:50Z","lastTransitionTime":"2025-11-24T13:18:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.811223 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"Map:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:169.254.0.2:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {4de02fb8-85f8-4208-9384-785ba5457d16}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:default/kubernetes]} name:Service_default/kubernetes_TCP_node_switch_crc options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:192.168.126.11:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {b21188fe-5483-4717-afe6-20a41a40b91a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 13:18:45.437891 6505 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1124 13:18:45.437958 6505 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1124 13:18:45.437988 6505 ovnkube.go:599] Stopped ovnkube\\\\nI1124 13:18:45.438015 6505 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 13:18:45.438073 6505 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-w2ctb_openshift-ovn-kubernetes(54c05b03-6747-47bf-a40d-8a9332c4d856)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.829185 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vnpwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5926107d-81bc-4e34-9e27-8018cbccf590\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vnpwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.849432 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.875690 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.894187 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.911782 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.911823 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.911836 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.911854 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:50 crc kubenswrapper[5039]: I1124 13:18:50.911866 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:50Z","lastTransitionTime":"2025-11-24T13:18:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.015010 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.015083 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.015095 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.015139 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.015151 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:51Z","lastTransitionTime":"2025-11-24T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.118633 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.118694 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.118712 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.118839 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.118859 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:51Z","lastTransitionTime":"2025-11-24T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.222320 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.222408 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.222457 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.222481 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.222498 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:51Z","lastTransitionTime":"2025-11-24T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.306177 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.306235 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.306264 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:51 crc kubenswrapper[5039]: E1124 13:18:51.306359 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:18:51 crc kubenswrapper[5039]: E1124 13:18:51.306551 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:18:51 crc kubenswrapper[5039]: E1124 13:18:51.306753 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.325640 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.325684 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.325696 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.325716 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.325729 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:51Z","lastTransitionTime":"2025-11-24T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.428594 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.428664 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.428692 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.428726 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.428750 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:51Z","lastTransitionTime":"2025-11-24T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.531726 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.531771 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.531792 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.531819 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.531835 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:51Z","lastTransitionTime":"2025-11-24T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.634440 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.634495 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.634537 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.634560 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.634576 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:51Z","lastTransitionTime":"2025-11-24T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.737057 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.737104 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.737118 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.737139 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.737151 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:51Z","lastTransitionTime":"2025-11-24T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.840992 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.841038 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.841049 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.841071 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.841083 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:51Z","lastTransitionTime":"2025-11-24T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.918811 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs\") pod \"network-metrics-daemon-vnpwt\" (UID: \"5926107d-81bc-4e34-9e27-8018cbccf590\") " pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:18:51 crc kubenswrapper[5039]: E1124 13:18:51.919036 5039 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 13:18:51 crc kubenswrapper[5039]: E1124 13:18:51.919196 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs podName:5926107d-81bc-4e34-9e27-8018cbccf590 nodeName:}" failed. No retries permitted until 2025-11-24 13:18:55.919156785 +0000 UTC m=+48.358281345 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs") pod "network-metrics-daemon-vnpwt" (UID: "5926107d-81bc-4e34-9e27-8018cbccf590") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.944421 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.944457 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.944469 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.944485 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:51 crc kubenswrapper[5039]: I1124 13:18:51.944496 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:51Z","lastTransitionTime":"2025-11-24T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.048166 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.048227 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.048253 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.048279 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.048298 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:52Z","lastTransitionTime":"2025-11-24T13:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.151171 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.151302 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.151322 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.151402 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.151433 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:52Z","lastTransitionTime":"2025-11-24T13:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.254781 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.254845 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.254863 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.254893 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.254912 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:52Z","lastTransitionTime":"2025-11-24T13:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.306324 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:18:52 crc kubenswrapper[5039]: E1124 13:18:52.306592 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.358048 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.358114 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.358129 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.358157 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.358173 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:52Z","lastTransitionTime":"2025-11-24T13:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.462164 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.462239 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.462263 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.462293 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.462315 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:52Z","lastTransitionTime":"2025-11-24T13:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.564588 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.564670 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.564696 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.564728 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.564752 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:52Z","lastTransitionTime":"2025-11-24T13:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.668113 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.668201 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.668227 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.668259 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.668278 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:52Z","lastTransitionTime":"2025-11-24T13:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.771993 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.772056 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.772074 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.772096 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.772113 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:52Z","lastTransitionTime":"2025-11-24T13:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.874384 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.874419 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.874431 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.874461 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.874471 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:52Z","lastTransitionTime":"2025-11-24T13:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.977666 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.977724 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.977741 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.977765 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:52 crc kubenswrapper[5039]: I1124 13:18:52.977787 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:52Z","lastTransitionTime":"2025-11-24T13:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.080812 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.080871 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.080887 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.080910 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.080928 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:53Z","lastTransitionTime":"2025-11-24T13:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.183897 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.183941 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.183952 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.183966 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.183976 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:53Z","lastTransitionTime":"2025-11-24T13:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.287966 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.288053 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.288063 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.288092 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.288112 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:53Z","lastTransitionTime":"2025-11-24T13:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.306763 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.306878 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.306794 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:53 crc kubenswrapper[5039]: E1124 13:18:53.307483 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:18:53 crc kubenswrapper[5039]: E1124 13:18:53.308679 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:18:53 crc kubenswrapper[5039]: E1124 13:18:53.311959 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.391458 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.391816 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.391909 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.391999 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.392083 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:53Z","lastTransitionTime":"2025-11-24T13:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.494724 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.494823 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.494848 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.494879 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.494900 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:53Z","lastTransitionTime":"2025-11-24T13:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.596970 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.597041 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.597060 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.597084 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.597168 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:53Z","lastTransitionTime":"2025-11-24T13:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.700304 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.700382 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.700406 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.700436 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.700457 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:53Z","lastTransitionTime":"2025-11-24T13:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.803569 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.803613 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.803624 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.803643 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.803655 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:53Z","lastTransitionTime":"2025-11-24T13:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.907247 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.907309 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.907320 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.907342 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:53 crc kubenswrapper[5039]: I1124 13:18:53.907357 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:53Z","lastTransitionTime":"2025-11-24T13:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.010490 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.010563 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.010574 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.010587 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.010597 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:54Z","lastTransitionTime":"2025-11-24T13:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.112474 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.112533 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.112544 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.112558 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.112568 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:54Z","lastTransitionTime":"2025-11-24T13:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.215155 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.215192 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.215204 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.215220 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.215231 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:54Z","lastTransitionTime":"2025-11-24T13:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.306558 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:18:54 crc kubenswrapper[5039]: E1124 13:18:54.306774 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.318155 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.318230 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.318251 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.318273 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.318289 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:54Z","lastTransitionTime":"2025-11-24T13:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.421268 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.421378 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.421397 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.421421 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.421439 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:54Z","lastTransitionTime":"2025-11-24T13:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.525076 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.525152 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.525177 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.525211 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.525234 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:54Z","lastTransitionTime":"2025-11-24T13:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.627289 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.627330 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.627341 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.627360 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.627371 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:54Z","lastTransitionTime":"2025-11-24T13:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.730616 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.730660 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.730672 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.730689 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.730698 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:54Z","lastTransitionTime":"2025-11-24T13:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.833972 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.834009 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.834017 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.834034 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.834043 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:54Z","lastTransitionTime":"2025-11-24T13:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.937611 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.937686 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.937711 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.937740 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:54 crc kubenswrapper[5039]: I1124 13:18:54.937762 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:54Z","lastTransitionTime":"2025-11-24T13:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.041267 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.041366 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.041385 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.041410 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.041427 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:55Z","lastTransitionTime":"2025-11-24T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.144420 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.144470 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.144481 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.144523 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.144535 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:55Z","lastTransitionTime":"2025-11-24T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.247913 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.247979 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.248005 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.248034 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.248056 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:55Z","lastTransitionTime":"2025-11-24T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.305910 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.305979 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:55 crc kubenswrapper[5039]: E1124 13:18:55.306095 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.305987 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:55 crc kubenswrapper[5039]: E1124 13:18:55.306285 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:18:55 crc kubenswrapper[5039]: E1124 13:18:55.306494 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.351023 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.351090 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.351115 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.351144 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.351170 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:55Z","lastTransitionTime":"2025-11-24T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.454316 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.454377 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.454399 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.454429 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.454450 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:55Z","lastTransitionTime":"2025-11-24T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.557755 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.558036 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.558049 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.558065 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.558077 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:55Z","lastTransitionTime":"2025-11-24T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.661045 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.661117 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.661142 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.661173 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.661196 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:55Z","lastTransitionTime":"2025-11-24T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.763884 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.763954 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.763978 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.764007 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.764026 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:55Z","lastTransitionTime":"2025-11-24T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.865676 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.865736 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.865758 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.865789 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.865811 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:55Z","lastTransitionTime":"2025-11-24T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:55 crc kubenswrapper[5039]: E1124 13:18:55.887142 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:55Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.892395 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.892453 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.892470 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.892494 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.892541 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:55Z","lastTransitionTime":"2025-11-24T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:55 crc kubenswrapper[5039]: E1124 13:18:55.913331 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:55Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.918657 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.918725 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.918747 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.918768 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.918784 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:55Z","lastTransitionTime":"2025-11-24T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:55 crc kubenswrapper[5039]: E1124 13:18:55.938959 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:55Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.944280 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.944341 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.944365 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.944392 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.944414 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:55Z","lastTransitionTime":"2025-11-24T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.959019 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs\") pod \"network-metrics-daemon-vnpwt\" (UID: \"5926107d-81bc-4e34-9e27-8018cbccf590\") " pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:18:55 crc kubenswrapper[5039]: E1124 13:18:55.959228 5039 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 13:18:55 crc kubenswrapper[5039]: E1124 13:18:55.959331 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs podName:5926107d-81bc-4e34-9e27-8018cbccf590 nodeName:}" failed. No retries permitted until 2025-11-24 13:19:03.95929976 +0000 UTC m=+56.398424300 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs") pod "network-metrics-daemon-vnpwt" (UID: "5926107d-81bc-4e34-9e27-8018cbccf590") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 13:18:55 crc kubenswrapper[5039]: E1124 13:18:55.967769 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:55Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.972902 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.972962 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.972985 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.973016 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.973038 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:55Z","lastTransitionTime":"2025-11-24T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:55 crc kubenswrapper[5039]: E1124 13:18:55.996406 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:55Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:55 crc kubenswrapper[5039]: E1124 13:18:55.996702 5039 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.999138 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.999207 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.999233 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.999265 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:55 crc kubenswrapper[5039]: I1124 13:18:55.999288 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:55Z","lastTransitionTime":"2025-11-24T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.101921 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.101973 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.102008 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.102038 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.102062 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:56Z","lastTransitionTime":"2025-11-24T13:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.205229 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.205306 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.205330 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.205358 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.205382 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:56Z","lastTransitionTime":"2025-11-24T13:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.306208 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:18:56 crc kubenswrapper[5039]: E1124 13:18:56.306708 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.308209 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.308280 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.308299 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.308328 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.308347 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:56Z","lastTransitionTime":"2025-11-24T13:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.411048 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.411427 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.411767 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.412123 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.412311 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:56Z","lastTransitionTime":"2025-11-24T13:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.516202 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.516256 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.516272 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.516298 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.516314 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:56Z","lastTransitionTime":"2025-11-24T13:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.619960 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.620381 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.620574 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.620750 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.620906 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:56Z","lastTransitionTime":"2025-11-24T13:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.724142 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.724209 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.724272 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.724300 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.724321 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:56Z","lastTransitionTime":"2025-11-24T13:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.827936 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.828007 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.828025 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.828051 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.828074 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:56Z","lastTransitionTime":"2025-11-24T13:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.931263 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.931324 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.931344 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.931370 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:56 crc kubenswrapper[5039]: I1124 13:18:56.931387 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:56Z","lastTransitionTime":"2025-11-24T13:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.034557 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.034594 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.034604 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.034621 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.034633 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:57Z","lastTransitionTime":"2025-11-24T13:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.141013 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.141092 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.141111 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.141137 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.141154 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:57Z","lastTransitionTime":"2025-11-24T13:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.244397 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.244485 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.244539 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.244571 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.244594 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:57Z","lastTransitionTime":"2025-11-24T13:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.306580 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.306639 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.306683 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:57 crc kubenswrapper[5039]: E1124 13:18:57.306781 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:18:57 crc kubenswrapper[5039]: E1124 13:18:57.306922 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:18:57 crc kubenswrapper[5039]: E1124 13:18:57.307138 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.353592 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.353655 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.353674 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.353700 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.353721 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:57Z","lastTransitionTime":"2025-11-24T13:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.456193 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.456251 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.456268 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.456290 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.456307 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:57Z","lastTransitionTime":"2025-11-24T13:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.558324 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.558374 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.558386 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.558404 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.558417 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:57Z","lastTransitionTime":"2025-11-24T13:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.661392 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.661465 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.661484 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.661549 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.661568 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:57Z","lastTransitionTime":"2025-11-24T13:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.765357 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.765411 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.765424 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.765441 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.765453 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:57Z","lastTransitionTime":"2025-11-24T13:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.868684 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.868736 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.868754 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.868775 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.868793 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:57Z","lastTransitionTime":"2025-11-24T13:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.972639 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.972714 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.972740 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.972770 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:57 crc kubenswrapper[5039]: I1124 13:18:57.972790 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:57Z","lastTransitionTime":"2025-11-24T13:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.075475 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.075532 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.075545 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.075560 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.075573 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:58Z","lastTransitionTime":"2025-11-24T13:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.178575 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.178608 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.178616 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.178629 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.178640 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:58Z","lastTransitionTime":"2025-11-24T13:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.281388 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.281427 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.281444 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.281460 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.281470 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:58Z","lastTransitionTime":"2025-11-24T13:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.306155 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:18:58 crc kubenswrapper[5039]: E1124 13:18:58.306256 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.323351 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.336395 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.350598 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.361706 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.379410 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.383975 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.384097 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.384124 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.384157 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.384182 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:58Z","lastTransitionTime":"2025-11-24T13:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.403862 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"Map:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:169.254.0.2:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {4de02fb8-85f8-4208-9384-785ba5457d16}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:default/kubernetes]} name:Service_default/kubernetes_TCP_node_switch_crc options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:192.168.126.11:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {b21188fe-5483-4717-afe6-20a41a40b91a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 13:18:45.437891 6505 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1124 13:18:45.437958 6505 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1124 13:18:45.437988 6505 ovnkube.go:599] Stopped ovnkube\\\\nI1124 13:18:45.438015 6505 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 13:18:45.438073 6505 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-w2ctb_openshift-ovn-kubernetes(54c05b03-6747-47bf-a40d-8a9332c4d856)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.414444 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vnpwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5926107d-81bc-4e34-9e27-8018cbccf590\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vnpwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.428549 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.444130 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.458564 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.477940 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.486580 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.486611 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.486622 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.486636 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.486648 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:58Z","lastTransitionTime":"2025-11-24T13:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.500055 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.517525 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.534022 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.551983 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.565665 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb6a0812c1e533294fbc653814a92d6672c2d3479d5b28c2c2d5dafae604916c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c8f2f7f29ea14926868534547c27d44a5ad0bb742fd042f42849590e34a54e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-v5nbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.589185 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.589267 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.589281 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.589312 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.589333 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:58Z","lastTransitionTime":"2025-11-24T13:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.697334 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.697371 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.697382 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.697423 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.697435 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:58Z","lastTransitionTime":"2025-11-24T13:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.800293 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.800348 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.800367 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.800390 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.800408 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:58Z","lastTransitionTime":"2025-11-24T13:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.903570 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.903642 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.903666 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.903696 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:58 crc kubenswrapper[5039]: I1124 13:18:58.903719 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:58Z","lastTransitionTime":"2025-11-24T13:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.006399 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.006585 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.006618 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.006654 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.006689 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:59Z","lastTransitionTime":"2025-11-24T13:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.109441 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.109542 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.109563 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.109586 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.109606 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:59Z","lastTransitionTime":"2025-11-24T13:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.213324 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.213391 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.213411 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.213436 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.213457 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:59Z","lastTransitionTime":"2025-11-24T13:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.306448 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.306554 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.306561 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:18:59 crc kubenswrapper[5039]: E1124 13:18:59.306710 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:18:59 crc kubenswrapper[5039]: E1124 13:18:59.307212 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:18:59 crc kubenswrapper[5039]: E1124 13:18:59.307369 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.317291 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.317359 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.317382 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.317413 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.317437 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:59Z","lastTransitionTime":"2025-11-24T13:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.419522 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.419567 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.419582 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.419602 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.419615 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:59Z","lastTransitionTime":"2025-11-24T13:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.522151 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.522190 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.522198 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.522215 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.522224 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:59Z","lastTransitionTime":"2025-11-24T13:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.625577 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.625691 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.625717 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.625742 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.625760 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:59Z","lastTransitionTime":"2025-11-24T13:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.728433 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.728750 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.728837 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.728924 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.729200 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:59Z","lastTransitionTime":"2025-11-24T13:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.831683 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.831716 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.831727 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.831743 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.831754 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:59Z","lastTransitionTime":"2025-11-24T13:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.935133 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.935209 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.935232 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.935261 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:18:59 crc kubenswrapper[5039]: I1124 13:18:59.935284 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:18:59Z","lastTransitionTime":"2025-11-24T13:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.037613 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.038107 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.038180 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.038254 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.038342 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:00Z","lastTransitionTime":"2025-11-24T13:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.141864 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.142203 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.142292 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.142381 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.142462 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:00Z","lastTransitionTime":"2025-11-24T13:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.244879 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.245131 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.245244 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.245328 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.245407 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:00Z","lastTransitionTime":"2025-11-24T13:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.332389 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:00 crc kubenswrapper[5039]: E1124 13:19:00.332576 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.347252 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.347301 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.347314 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.347337 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.347350 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:00Z","lastTransitionTime":"2025-11-24T13:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.450255 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.450300 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.450309 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.450325 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.450334 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:00Z","lastTransitionTime":"2025-11-24T13:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.554668 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.554726 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.554743 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.554765 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.554780 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:00Z","lastTransitionTime":"2025-11-24T13:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.658165 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.658465 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.658625 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.658734 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.658842 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:00Z","lastTransitionTime":"2025-11-24T13:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.761208 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.761238 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.761247 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.761259 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.761268 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:00Z","lastTransitionTime":"2025-11-24T13:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.864145 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.864626 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.864860 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.865061 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.865271 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:00Z","lastTransitionTime":"2025-11-24T13:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.968170 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.968230 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.968251 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.968276 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:00 crc kubenswrapper[5039]: I1124 13:19:00.968296 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:00Z","lastTransitionTime":"2025-11-24T13:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.038098 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:19:01 crc kubenswrapper[5039]: E1124 13:19:01.038273 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:19:33.038246754 +0000 UTC m=+85.477371254 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.070864 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.071091 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.071313 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.071536 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.071627 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:01Z","lastTransitionTime":"2025-11-24T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.139577 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:01 crc kubenswrapper[5039]: E1124 13:19:01.139806 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 13:19:01 crc kubenswrapper[5039]: E1124 13:19:01.139974 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 13:19:01 crc kubenswrapper[5039]: E1124 13:19:01.139995 5039 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:19:01 crc kubenswrapper[5039]: E1124 13:19:01.140067 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 13:19:33.140040183 +0000 UTC m=+85.579164713 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.139924 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.140216 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.140349 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:01 crc kubenswrapper[5039]: E1124 13:19:01.140415 5039 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 13:19:01 crc kubenswrapper[5039]: E1124 13:19:01.140584 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 13:19:33.140556176 +0000 UTC m=+85.579680706 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 13:19:01 crc kubenswrapper[5039]: E1124 13:19:01.140587 5039 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 13:19:01 crc kubenswrapper[5039]: E1124 13:19:01.140615 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 13:19:01 crc kubenswrapper[5039]: E1124 13:19:01.140774 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 13:19:33.14069409 +0000 UTC m=+85.579818630 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 13:19:01 crc kubenswrapper[5039]: E1124 13:19:01.140780 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 13:19:01 crc kubenswrapper[5039]: E1124 13:19:01.140812 5039 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:19:01 crc kubenswrapper[5039]: E1124 13:19:01.140887 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 13:19:33.140864274 +0000 UTC m=+85.579988884 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.175139 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.175189 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.175205 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.175228 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.175246 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:01Z","lastTransitionTime":"2025-11-24T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.278958 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.279046 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.279058 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.279075 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.279087 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:01Z","lastTransitionTime":"2025-11-24T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.306732 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.306742 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:01 crc kubenswrapper[5039]: E1124 13:19:01.306957 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.307465 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:01 crc kubenswrapper[5039]: E1124 13:19:01.307764 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:01 crc kubenswrapper[5039]: E1124 13:19:01.307834 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.308447 5039 scope.go:117] "RemoveContainer" containerID="a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.381974 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.382036 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.382054 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.382080 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.382133 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:01Z","lastTransitionTime":"2025-11-24T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.485796 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.485864 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.485885 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.485912 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.485930 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:01Z","lastTransitionTime":"2025-11-24T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.589867 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.589936 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.589962 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.589998 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.590023 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:01Z","lastTransitionTime":"2025-11-24T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.630472 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w2ctb_54c05b03-6747-47bf-a40d-8a9332c4d856/ovnkube-controller/1.log" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.634302 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerStarted","Data":"b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6"} Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.634947 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.669732 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"Map:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:169.254.0.2:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {4de02fb8-85f8-4208-9384-785ba5457d16}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:default/kubernetes]} name:Service_default/kubernetes_TCP_node_switch_crc options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:192.168.126.11:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {b21188fe-5483-4717-afe6-20a41a40b91a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 13:18:45.437891 6505 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1124 13:18:45.437958 6505 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1124 13:18:45.437988 6505 ovnkube.go:599] Stopped ovnkube\\\\nI1124 13:18:45.438015 6505 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 13:18:45.438073 6505 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.683936 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vnpwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5926107d-81bc-4e34-9e27-8018cbccf590\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vnpwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.692857 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.692907 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.692918 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.692937 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.692956 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:01Z","lastTransitionTime":"2025-11-24T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.699238 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.716105 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.733758 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.753741 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.772422 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.788034 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.795089 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.795160 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.795171 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.795193 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.795204 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:01Z","lastTransitionTime":"2025-11-24T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.801733 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.813788 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb6a0812c1e533294fbc653814a92d6672c2d3479d5b28c2c2d5dafae604916c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c8f2f7f29ea14926868534547c27d44a5ad0bb742fd042f42849590e34a54e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-v5nbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.829888 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.843855 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.863272 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.877459 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.888326 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.897295 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.897328 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.897337 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.897352 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.897361 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:01Z","lastTransitionTime":"2025-11-24T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.900843 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.999856 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.999895 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.999905 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.999919 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:01 crc kubenswrapper[5039]: I1124 13:19:01.999928 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:01Z","lastTransitionTime":"2025-11-24T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.059340 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.068001 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.070935 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.081033 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.101901 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.101936 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.101950 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.101965 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.101976 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:02Z","lastTransitionTime":"2025-11-24T13:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.111284 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.135071 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.159047 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.171590 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.182016 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb6a0812c1e533294fbc653814a92d6672c2d3479d5b28c2c2d5dafae604916c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c8f2f7f29ea14926868534547c27d44a5ad0bb742fd042f42849590e34a54e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-v5nbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.192702 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.204551 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.204593 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.204605 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.204625 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.204638 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:02Z","lastTransitionTime":"2025-11-24T13:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.206284 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.219572 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.234632 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.247311 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.262838 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.282758 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"Map:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:169.254.0.2:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {4de02fb8-85f8-4208-9384-785ba5457d16}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:default/kubernetes]} name:Service_default/kubernetes_TCP_node_switch_crc options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:192.168.126.11:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {b21188fe-5483-4717-afe6-20a41a40b91a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 13:18:45.437891 6505 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1124 13:18:45.437958 6505 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1124 13:18:45.437988 6505 ovnkube.go:599] Stopped ovnkube\\\\nI1124 13:18:45.438015 6505 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 13:18:45.438073 6505 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.293864 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vnpwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5926107d-81bc-4e34-9e27-8018cbccf590\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vnpwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.304478 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.305675 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:02 crc kubenswrapper[5039]: E1124 13:19:02.305824 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.306847 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.306894 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.306908 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.306923 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.306935 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:02Z","lastTransitionTime":"2025-11-24T13:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.409799 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.409833 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.409841 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.409855 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.409863 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:02Z","lastTransitionTime":"2025-11-24T13:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.512326 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.512368 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.512381 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.512398 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.512408 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:02Z","lastTransitionTime":"2025-11-24T13:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.615213 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.615282 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.615294 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.615317 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.615328 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:02Z","lastTransitionTime":"2025-11-24T13:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.641723 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w2ctb_54c05b03-6747-47bf-a40d-8a9332c4d856/ovnkube-controller/2.log" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.643003 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w2ctb_54c05b03-6747-47bf-a40d-8a9332c4d856/ovnkube-controller/1.log" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.647691 5039 generic.go:334] "Generic (PLEG): container finished" podID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerID="b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6" exitCode=1 Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.647799 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerDied","Data":"b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6"} Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.647902 5039 scope.go:117] "RemoveContainer" containerID="a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.648597 5039 scope.go:117] "RemoveContainer" containerID="b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6" Nov 24 13:19:02 crc kubenswrapper[5039]: E1124 13:19:02.648945 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-w2ctb_openshift-ovn-kubernetes(54c05b03-6747-47bf-a40d-8a9332c4d856)\"" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.666094 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.679673 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.691711 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.706004 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.716756 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb6a0812c1e533294fbc653814a92d6672c2d3479d5b28c2c2d5dafae604916c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c8f2f7f29ea14926868534547c27d44a5ad0bb742fd042f42849590e34a54e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-v5nbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.717698 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.717731 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.717746 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.717779 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.717797 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:02Z","lastTransitionTime":"2025-11-24T13:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.729236 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.740539 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cea2230e-9024-455b-87aa-1b4c5b188723\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6328891f72adff70742b2aa64842672875abde57dfd275453ddbc585af80f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://62e7a017d9a3276e864342729bdc35453bb95e9e469760efb6ea283ffb618228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5616706b755bdff47ccadc09ae036e231c76e5953f8c5af9ea9cf8f8e449c59c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.752434 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.765706 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.779113 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.792800 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.806582 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.820314 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.820363 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.820374 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.820391 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.820402 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:02Z","lastTransitionTime":"2025-11-24T13:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.829423 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7d3ce45c31063838c69ae1cd23ae8cd2f7b0d1c997d8e8821583a83195cb043\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:18:45Z\\\",\\\"message\\\":\\\"Map:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:169.254.0.2:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {4de02fb8-85f8-4208-9384-785ba5457d16}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:default/kubernetes]} name:Service_default/kubernetes_TCP_node_switch_crc options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:192.168.126.11:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {b21188fe-5483-4717-afe6-20a41a40b91a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 13:18:45.437891 6505 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1124 13:18:45.437958 6505 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1124 13:18:45.437988 6505 ovnkube.go:599] Stopped ovnkube\\\\nI1124 13:18:45.438015 6505 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 13:18:45.438073 6505 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"message\\\":\\\" handler 2 for removal\\\\nI1124 13:19:02.267268 6719 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 13:19:02.267314 6719 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 13:19:02.267322 6719 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1124 13:19:02.267351 6719 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 13:19:02.267373 6719 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 13:19:02.267384 6719 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 13:19:02.267393 6719 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 13:19:02.267424 6719 factory.go:656] Stopping watch factory\\\\nI1124 13:19:02.267448 6719 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1124 13:19:02.267458 6719 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 13:19:02.267467 6719 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 13:19:02.267475 6719 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1124 13:19:02.267492 6719 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 13:19:02.268628 6719 ovnkube.go:599] Stopped ovnkube\\\\nI1124 13:19:02.268688 6719 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 13:19:02.268771 6719 ovnkube.go:137] failed to run ovnkube: [failed to start network contr\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.841966 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vnpwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5926107d-81bc-4e34-9e27-8018cbccf590\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vnpwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.858467 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.870193 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.887397 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.923631 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.923685 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.923698 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.923717 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:02 crc kubenswrapper[5039]: I1124 13:19:02.923730 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:02Z","lastTransitionTime":"2025-11-24T13:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.025948 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.025979 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.025987 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.026002 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.026013 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:03Z","lastTransitionTime":"2025-11-24T13:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.128095 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.128141 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.128151 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.128170 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.128181 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:03Z","lastTransitionTime":"2025-11-24T13:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.231216 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.231262 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.231279 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.231308 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.231322 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:03Z","lastTransitionTime":"2025-11-24T13:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.306134 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.306251 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.306251 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:03 crc kubenswrapper[5039]: E1124 13:19:03.306438 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:03 crc kubenswrapper[5039]: E1124 13:19:03.306586 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:03 crc kubenswrapper[5039]: E1124 13:19:03.306721 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.334419 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.334499 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.334563 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.334597 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.334619 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:03Z","lastTransitionTime":"2025-11-24T13:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.437620 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.437704 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.437730 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.437760 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.437782 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:03Z","lastTransitionTime":"2025-11-24T13:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.540536 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.540627 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.540648 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.540668 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.540681 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:03Z","lastTransitionTime":"2025-11-24T13:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.643859 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.643966 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.644000 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.644034 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.644056 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:03Z","lastTransitionTime":"2025-11-24T13:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.653854 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w2ctb_54c05b03-6747-47bf-a40d-8a9332c4d856/ovnkube-controller/2.log" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.660127 5039 scope.go:117] "RemoveContainer" containerID="b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6" Nov 24 13:19:03 crc kubenswrapper[5039]: E1124 13:19:03.660373 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-w2ctb_openshift-ovn-kubernetes(54c05b03-6747-47bf-a40d-8a9332c4d856)\"" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.681621 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.703006 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.725126 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.743130 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.753799 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.753856 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.753874 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.753898 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.753915 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:03Z","lastTransitionTime":"2025-11-24T13:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.763175 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.775391 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb6a0812c1e533294fbc653814a92d6672c2d3479d5b28c2c2d5dafae604916c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c8f2f7f29ea14926868534547c27d44a5ad0bb742fd042f42849590e34a54e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-v5nbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.791415 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cea2230e-9024-455b-87aa-1b4c5b188723\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6328891f72adff70742b2aa64842672875abde57dfd275453ddbc585af80f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://62e7a017d9a3276e864342729bdc35453bb95e9e469760efb6ea283ffb618228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5616706b755bdff47ccadc09ae036e231c76e5953f8c5af9ea9cf8f8e449c59c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.805260 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.820982 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.833927 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.844619 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.856791 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.856932 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.856949 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.856965 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.856976 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:03Z","lastTransitionTime":"2025-11-24T13:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.858333 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.877017 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"message\\\":\\\" handler 2 for removal\\\\nI1124 13:19:02.267268 6719 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 13:19:02.267314 6719 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 13:19:02.267322 6719 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1124 13:19:02.267351 6719 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 13:19:02.267373 6719 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 13:19:02.267384 6719 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 13:19:02.267393 6719 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 13:19:02.267424 6719 factory.go:656] Stopping watch factory\\\\nI1124 13:19:02.267448 6719 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1124 13:19:02.267458 6719 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 13:19:02.267467 6719 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 13:19:02.267475 6719 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1124 13:19:02.267492 6719 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 13:19:02.268628 6719 ovnkube.go:599] Stopped ovnkube\\\\nI1124 13:19:02.268688 6719 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 13:19:02.268771 6719 ovnkube.go:137] failed to run ovnkube: [failed to start network contr\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:19:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-w2ctb_openshift-ovn-kubernetes(54c05b03-6747-47bf-a40d-8a9332c4d856)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.887173 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vnpwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5926107d-81bc-4e34-9e27-8018cbccf590\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vnpwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.898275 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.913276 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.924638 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.959439 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.959461 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.959470 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.959485 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.959494 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:03Z","lastTransitionTime":"2025-11-24T13:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:03 crc kubenswrapper[5039]: I1124 13:19:03.971573 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs\") pod \"network-metrics-daemon-vnpwt\" (UID: \"5926107d-81bc-4e34-9e27-8018cbccf590\") " pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:03 crc kubenswrapper[5039]: E1124 13:19:03.971673 5039 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 13:19:03 crc kubenswrapper[5039]: E1124 13:19:03.971717 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs podName:5926107d-81bc-4e34-9e27-8018cbccf590 nodeName:}" failed. No retries permitted until 2025-11-24 13:19:19.971702084 +0000 UTC m=+72.410826584 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs") pod "network-metrics-daemon-vnpwt" (UID: "5926107d-81bc-4e34-9e27-8018cbccf590") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.062643 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.062686 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.062695 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.062712 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.062727 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:04Z","lastTransitionTime":"2025-11-24T13:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.164849 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.164934 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.164959 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.164989 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.165006 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:04Z","lastTransitionTime":"2025-11-24T13:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.268323 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.268392 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.268408 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.268436 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.268455 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:04Z","lastTransitionTime":"2025-11-24T13:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.306383 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:04 crc kubenswrapper[5039]: E1124 13:19:04.306630 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.371535 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.371577 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.371585 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.371601 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.371610 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:04Z","lastTransitionTime":"2025-11-24T13:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.473387 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.473433 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.473444 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.473461 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.473473 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:04Z","lastTransitionTime":"2025-11-24T13:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.575456 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.575502 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.575532 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.575549 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.575559 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:04Z","lastTransitionTime":"2025-11-24T13:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.678154 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.678236 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.678254 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.678276 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.678293 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:04Z","lastTransitionTime":"2025-11-24T13:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.780216 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.780250 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.780258 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.780270 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.780280 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:04Z","lastTransitionTime":"2025-11-24T13:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.882287 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.882325 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.882336 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.882352 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.882364 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:04Z","lastTransitionTime":"2025-11-24T13:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.986130 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.986171 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.986183 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.986198 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:04 crc kubenswrapper[5039]: I1124 13:19:04.986212 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:04Z","lastTransitionTime":"2025-11-24T13:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.089060 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.089110 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.089121 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.089141 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.089154 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:05Z","lastTransitionTime":"2025-11-24T13:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.191785 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.191856 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.191870 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.191888 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.191899 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:05Z","lastTransitionTime":"2025-11-24T13:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.293853 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.293909 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.293922 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.293943 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.293956 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:05Z","lastTransitionTime":"2025-11-24T13:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.306072 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.306175 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:05 crc kubenswrapper[5039]: E1124 13:19:05.306203 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.306085 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:05 crc kubenswrapper[5039]: E1124 13:19:05.306350 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:05 crc kubenswrapper[5039]: E1124 13:19:05.306419 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.396554 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.396586 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.396595 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.396608 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.396617 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:05Z","lastTransitionTime":"2025-11-24T13:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.498758 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.498847 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.498881 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.498913 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.498938 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:05Z","lastTransitionTime":"2025-11-24T13:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.601216 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.601254 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.601263 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.601277 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.601287 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:05Z","lastTransitionTime":"2025-11-24T13:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.704746 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.704812 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.704835 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.704865 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.704885 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:05Z","lastTransitionTime":"2025-11-24T13:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.808093 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.808186 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.808217 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.808252 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.808277 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:05Z","lastTransitionTime":"2025-11-24T13:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.911321 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.911389 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.911406 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.911432 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:05 crc kubenswrapper[5039]: I1124 13:19:05.911450 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:05Z","lastTransitionTime":"2025-11-24T13:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.014546 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.014625 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.014650 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.014679 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.014702 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:06Z","lastTransitionTime":"2025-11-24T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.117096 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.117169 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.117191 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.117220 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.117244 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:06Z","lastTransitionTime":"2025-11-24T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.219253 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.219286 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.219297 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.219312 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.219323 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:06Z","lastTransitionTime":"2025-11-24T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.262097 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.262137 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.262147 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.262163 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.262178 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:06Z","lastTransitionTime":"2025-11-24T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:06 crc kubenswrapper[5039]: E1124 13:19:06.274978 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:06Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.279102 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.279154 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.279175 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.279200 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.279217 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:06Z","lastTransitionTime":"2025-11-24T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:06 crc kubenswrapper[5039]: E1124 13:19:06.291618 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:06Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.295704 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.295763 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.295780 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.295801 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.295819 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:06Z","lastTransitionTime":"2025-11-24T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:06 crc kubenswrapper[5039]: E1124 13:19:06.309581 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:06Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.312669 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.312707 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.312722 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.312744 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.312759 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:06Z","lastTransitionTime":"2025-11-24T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:06 crc kubenswrapper[5039]: E1124 13:19:06.323989 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:06Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.326804 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.326847 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.326862 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.326882 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.326897 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:06Z","lastTransitionTime":"2025-11-24T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.331163 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.331199 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:06 crc kubenswrapper[5039]: E1124 13:19:06.331324 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:06 crc kubenswrapper[5039]: E1124 13:19:06.331461 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:06 crc kubenswrapper[5039]: E1124 13:19:06.338292 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:06Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:06 crc kubenswrapper[5039]: E1124 13:19:06.338430 5039 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.342374 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.342434 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.342451 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.342471 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.342492 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:06Z","lastTransitionTime":"2025-11-24T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.446189 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.446248 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.446265 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.446289 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.446332 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:06Z","lastTransitionTime":"2025-11-24T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.549348 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.549397 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.549415 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.549438 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.549455 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:06Z","lastTransitionTime":"2025-11-24T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.652346 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.652419 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.652439 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.652472 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.652497 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:06Z","lastTransitionTime":"2025-11-24T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.756027 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.756093 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.756116 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.756147 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.756169 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:06Z","lastTransitionTime":"2025-11-24T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.860107 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.860159 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.860178 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.860202 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.860223 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:06Z","lastTransitionTime":"2025-11-24T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.962719 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.962823 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.962840 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.963334 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:06 crc kubenswrapper[5039]: I1124 13:19:06.963615 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:06Z","lastTransitionTime":"2025-11-24T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.065881 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.065931 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.065953 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.065977 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.065993 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:07Z","lastTransitionTime":"2025-11-24T13:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.169194 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.169439 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.169496 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.169566 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.169583 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:07Z","lastTransitionTime":"2025-11-24T13:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.271814 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.271863 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.271875 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.271893 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.271905 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:07Z","lastTransitionTime":"2025-11-24T13:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.306301 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:07 crc kubenswrapper[5039]: E1124 13:19:07.306487 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.306297 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:07 crc kubenswrapper[5039]: E1124 13:19:07.306721 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.374666 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.374717 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.374733 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.374755 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.374771 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:07Z","lastTransitionTime":"2025-11-24T13:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.478046 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.478112 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.478128 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.478154 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.478179 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:07Z","lastTransitionTime":"2025-11-24T13:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.581682 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.581787 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.581806 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.581833 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.581854 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:07Z","lastTransitionTime":"2025-11-24T13:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.684923 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.685001 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.685034 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.685074 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.685098 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:07Z","lastTransitionTime":"2025-11-24T13:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.787583 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.787646 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.787664 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.787687 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.787704 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:07Z","lastTransitionTime":"2025-11-24T13:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.890562 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.890629 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.890644 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.890662 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.890675 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:07Z","lastTransitionTime":"2025-11-24T13:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.994073 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.994140 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.994161 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.994196 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:07 crc kubenswrapper[5039]: I1124 13:19:07.994222 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:07Z","lastTransitionTime":"2025-11-24T13:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.097708 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.097779 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.097804 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.097833 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.097854 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:08Z","lastTransitionTime":"2025-11-24T13:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.201482 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.201595 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.201615 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.201639 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.201659 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:08Z","lastTransitionTime":"2025-11-24T13:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.304157 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.304609 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.304628 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.304652 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.304671 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:08Z","lastTransitionTime":"2025-11-24T13:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.305945 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.305969 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:08 crc kubenswrapper[5039]: E1124 13:19:08.306176 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:08 crc kubenswrapper[5039]: E1124 13:19:08.306346 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.326982 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.341313 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.371585 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"message\\\":\\\" handler 2 for removal\\\\nI1124 13:19:02.267268 6719 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 13:19:02.267314 6719 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 13:19:02.267322 6719 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1124 13:19:02.267351 6719 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 13:19:02.267373 6719 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 13:19:02.267384 6719 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 13:19:02.267393 6719 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 13:19:02.267424 6719 factory.go:656] Stopping watch factory\\\\nI1124 13:19:02.267448 6719 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1124 13:19:02.267458 6719 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 13:19:02.267467 6719 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 13:19:02.267475 6719 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1124 13:19:02.267492 6719 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 13:19:02.268628 6719 ovnkube.go:599] Stopped ovnkube\\\\nI1124 13:19:02.268688 6719 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 13:19:02.268771 6719 ovnkube.go:137] failed to run ovnkube: [failed to start network contr\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:19:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-w2ctb_openshift-ovn-kubernetes(54c05b03-6747-47bf-a40d-8a9332c4d856)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.389021 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vnpwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5926107d-81bc-4e34-9e27-8018cbccf590\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vnpwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.407574 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.407619 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.407632 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.407651 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.407665 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:08Z","lastTransitionTime":"2025-11-24T13:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.409754 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.431903 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.447086 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.465200 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.488290 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.509620 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.509682 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.509699 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.509723 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.509740 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:08Z","lastTransitionTime":"2025-11-24T13:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.511124 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.531223 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.552813 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.571287 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb6a0812c1e533294fbc653814a92d6672c2d3479d5b28c2c2d5dafae604916c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c8f2f7f29ea14926868534547c27d44a5ad0bb742fd042f42849590e34a54e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-v5nbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.592287 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cea2230e-9024-455b-87aa-1b4c5b188723\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6328891f72adff70742b2aa64842672875abde57dfd275453ddbc585af80f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://62e7a017d9a3276e864342729bdc35453bb95e9e469760efb6ea283ffb618228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5616706b755bdff47ccadc09ae036e231c76e5953f8c5af9ea9cf8f8e449c59c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.612681 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.612773 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.612797 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.612855 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.612875 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:08Z","lastTransitionTime":"2025-11-24T13:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.615731 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.635933 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.652361 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.716238 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.716303 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.716318 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.716339 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.716358 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:08Z","lastTransitionTime":"2025-11-24T13:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.819444 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.819564 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.819591 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.819620 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.819638 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:08Z","lastTransitionTime":"2025-11-24T13:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.921987 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.922064 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.922083 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.922109 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:08 crc kubenswrapper[5039]: I1124 13:19:08.922127 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:08Z","lastTransitionTime":"2025-11-24T13:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.025277 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.025332 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.025349 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.025371 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.025389 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:09Z","lastTransitionTime":"2025-11-24T13:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.128654 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.128726 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.128749 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.128779 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.128800 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:09Z","lastTransitionTime":"2025-11-24T13:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.231169 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.231229 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.231246 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.231271 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.231289 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:09Z","lastTransitionTime":"2025-11-24T13:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.306442 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.306491 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:09 crc kubenswrapper[5039]: E1124 13:19:09.306575 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:09 crc kubenswrapper[5039]: E1124 13:19:09.306697 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.333430 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.333483 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.333557 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.333581 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.333593 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:09Z","lastTransitionTime":"2025-11-24T13:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.436657 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.436729 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.436748 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.436788 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.436803 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:09Z","lastTransitionTime":"2025-11-24T13:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.540245 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.540301 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.540313 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.540329 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.540342 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:09Z","lastTransitionTime":"2025-11-24T13:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.643230 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.643326 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.643343 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.643368 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.643384 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:09Z","lastTransitionTime":"2025-11-24T13:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.745887 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.745966 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.745993 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.746024 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.746047 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:09Z","lastTransitionTime":"2025-11-24T13:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.848494 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.848624 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.848648 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.848674 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.848692 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:09Z","lastTransitionTime":"2025-11-24T13:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.951599 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.951664 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.951726 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.951754 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:09 crc kubenswrapper[5039]: I1124 13:19:09.951771 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:09Z","lastTransitionTime":"2025-11-24T13:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.054649 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.054735 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.054767 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.054804 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.054839 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:10Z","lastTransitionTime":"2025-11-24T13:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.158661 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.158738 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.158758 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.158782 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.158800 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:10Z","lastTransitionTime":"2025-11-24T13:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.261286 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.261337 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.261355 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.261379 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.261396 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:10Z","lastTransitionTime":"2025-11-24T13:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.305801 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.305822 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:10 crc kubenswrapper[5039]: E1124 13:19:10.305964 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:10 crc kubenswrapper[5039]: E1124 13:19:10.306164 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.367616 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.367677 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.367696 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.367723 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.367743 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:10Z","lastTransitionTime":"2025-11-24T13:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.476702 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.476756 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.476777 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.476802 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.476821 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:10Z","lastTransitionTime":"2025-11-24T13:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.579645 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.579710 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.579728 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.579752 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.579768 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:10Z","lastTransitionTime":"2025-11-24T13:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.684272 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.684534 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.684552 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.684578 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.684596 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:10Z","lastTransitionTime":"2025-11-24T13:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.787637 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.787694 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.787713 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.787737 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.787754 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:10Z","lastTransitionTime":"2025-11-24T13:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.891103 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.891185 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.891209 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.891241 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.891262 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:10Z","lastTransitionTime":"2025-11-24T13:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.995477 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.995662 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.995693 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.995722 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:10 crc kubenswrapper[5039]: I1124 13:19:10.995745 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:10Z","lastTransitionTime":"2025-11-24T13:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.099068 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.099147 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.099171 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.099205 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.099228 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:11Z","lastTransitionTime":"2025-11-24T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.201732 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.201808 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.201847 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.201877 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.201900 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:11Z","lastTransitionTime":"2025-11-24T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.305331 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.305378 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.305390 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.305408 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.305419 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:11Z","lastTransitionTime":"2025-11-24T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.305670 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.305733 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:11 crc kubenswrapper[5039]: E1124 13:19:11.305788 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:11 crc kubenswrapper[5039]: E1124 13:19:11.305945 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.408148 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.408225 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.408252 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.408283 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.408306 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:11Z","lastTransitionTime":"2025-11-24T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.511551 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.511615 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.511637 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.511666 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.511690 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:11Z","lastTransitionTime":"2025-11-24T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.614535 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.614574 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.614583 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.614599 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.614611 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:11Z","lastTransitionTime":"2025-11-24T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.717382 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.717438 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.717451 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.717472 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.717484 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:11Z","lastTransitionTime":"2025-11-24T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.819739 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.819782 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.819793 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.819810 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.819824 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:11Z","lastTransitionTime":"2025-11-24T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.923159 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.923216 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.923227 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.923255 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:11 crc kubenswrapper[5039]: I1124 13:19:11.923297 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:11Z","lastTransitionTime":"2025-11-24T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.026230 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.026271 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.026282 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.026298 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.026308 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:12Z","lastTransitionTime":"2025-11-24T13:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.130458 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.130575 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.130601 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.130634 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.130653 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:12Z","lastTransitionTime":"2025-11-24T13:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.233095 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.233135 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.233147 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.233166 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.233179 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:12Z","lastTransitionTime":"2025-11-24T13:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.306423 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.306468 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:12 crc kubenswrapper[5039]: E1124 13:19:12.306585 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:12 crc kubenswrapper[5039]: E1124 13:19:12.306687 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.335611 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.335647 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.335659 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.335673 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.335685 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:12Z","lastTransitionTime":"2025-11-24T13:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.438786 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.438840 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.438860 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.438882 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.438900 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:12Z","lastTransitionTime":"2025-11-24T13:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.541861 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.541930 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.541947 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.541971 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.541988 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:12Z","lastTransitionTime":"2025-11-24T13:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.644989 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.645077 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.645110 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.645147 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.645170 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:12Z","lastTransitionTime":"2025-11-24T13:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.747871 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.747923 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.747934 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.747950 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.747962 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:12Z","lastTransitionTime":"2025-11-24T13:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.850926 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.850990 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.851003 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.851020 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.851032 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:12Z","lastTransitionTime":"2025-11-24T13:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.953777 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.953819 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.953831 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.953846 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:12 crc kubenswrapper[5039]: I1124 13:19:12.953857 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:12Z","lastTransitionTime":"2025-11-24T13:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.056106 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.056144 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.056156 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.056173 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.056185 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:13Z","lastTransitionTime":"2025-11-24T13:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.158152 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.158238 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.158255 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.158271 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.158282 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:13Z","lastTransitionTime":"2025-11-24T13:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.261134 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.261168 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.261181 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.261198 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.261210 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:13Z","lastTransitionTime":"2025-11-24T13:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.305861 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.305906 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:13 crc kubenswrapper[5039]: E1124 13:19:13.306042 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:13 crc kubenswrapper[5039]: E1124 13:19:13.306158 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.363781 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.363852 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.363869 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.363894 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.363930 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:13Z","lastTransitionTime":"2025-11-24T13:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.466766 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.466810 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.466821 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.466838 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.466850 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:13Z","lastTransitionTime":"2025-11-24T13:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.570110 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.570173 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.570193 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.570219 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.570238 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:13Z","lastTransitionTime":"2025-11-24T13:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.673691 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.673941 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.674064 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.674160 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.674303 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:13Z","lastTransitionTime":"2025-11-24T13:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.777337 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.777401 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.777417 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.777441 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.777460 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:13Z","lastTransitionTime":"2025-11-24T13:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.879658 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.879710 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.879723 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.879740 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.879753 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:13Z","lastTransitionTime":"2025-11-24T13:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.982234 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.982270 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.982281 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.982296 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:13 crc kubenswrapper[5039]: I1124 13:19:13.982306 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:13Z","lastTransitionTime":"2025-11-24T13:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.085182 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.085227 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.085240 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.085257 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.085270 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:14Z","lastTransitionTime":"2025-11-24T13:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.187786 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.187835 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.187847 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.187864 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.187876 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:14Z","lastTransitionTime":"2025-11-24T13:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.290764 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.290812 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.290824 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.290843 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.290853 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:14Z","lastTransitionTime":"2025-11-24T13:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.306320 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.306383 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:14 crc kubenswrapper[5039]: E1124 13:19:14.306439 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:14 crc kubenswrapper[5039]: E1124 13:19:14.306648 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.392674 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.392705 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.392713 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.392726 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.392734 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:14Z","lastTransitionTime":"2025-11-24T13:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.495638 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.495697 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.495708 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.495726 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.495737 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:14Z","lastTransitionTime":"2025-11-24T13:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.598266 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.598331 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.598357 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.598393 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.598410 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:14Z","lastTransitionTime":"2025-11-24T13:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.700659 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.700699 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.700710 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.700726 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.700738 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:14Z","lastTransitionTime":"2025-11-24T13:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.803213 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.803255 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.803265 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.803280 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.803289 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:14Z","lastTransitionTime":"2025-11-24T13:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.906098 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.906127 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.906135 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.906149 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:14 crc kubenswrapper[5039]: I1124 13:19:14.906163 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:14Z","lastTransitionTime":"2025-11-24T13:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.008346 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.008383 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.008393 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.008409 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.008419 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:15Z","lastTransitionTime":"2025-11-24T13:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.111799 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.111841 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.111850 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.111865 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.111875 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:15Z","lastTransitionTime":"2025-11-24T13:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.215219 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.215299 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.215335 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.215368 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.215392 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:15Z","lastTransitionTime":"2025-11-24T13:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.306163 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:15 crc kubenswrapper[5039]: E1124 13:19:15.306290 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.306183 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:15 crc kubenswrapper[5039]: E1124 13:19:15.306553 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.318880 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.318911 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.318922 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.318937 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.318948 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:15Z","lastTransitionTime":"2025-11-24T13:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.420906 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.420955 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.420973 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.420996 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.421013 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:15Z","lastTransitionTime":"2025-11-24T13:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.523901 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.523958 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.523982 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.524011 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.524036 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:15Z","lastTransitionTime":"2025-11-24T13:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.626165 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.626213 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.626221 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.626236 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.626246 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:15Z","lastTransitionTime":"2025-11-24T13:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.728836 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.728878 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.728888 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.728904 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.728913 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:15Z","lastTransitionTime":"2025-11-24T13:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.831355 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.831393 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.831404 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.831420 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.831431 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:15Z","lastTransitionTime":"2025-11-24T13:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.934477 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.934740 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.934830 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.934911 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:15 crc kubenswrapper[5039]: I1124 13:19:15.935002 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:15Z","lastTransitionTime":"2025-11-24T13:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.036959 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.037068 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.037091 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.037119 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.037140 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:16Z","lastTransitionTime":"2025-11-24T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.139939 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.140022 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.140047 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.140083 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.140106 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:16Z","lastTransitionTime":"2025-11-24T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.242962 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.243052 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.243071 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.243096 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.243112 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:16Z","lastTransitionTime":"2025-11-24T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.306534 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.306612 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:16 crc kubenswrapper[5039]: E1124 13:19:16.306723 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.308141 5039 scope.go:117] "RemoveContainer" containerID="b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6" Nov 24 13:19:16 crc kubenswrapper[5039]: E1124 13:19:16.306897 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:16 crc kubenswrapper[5039]: E1124 13:19:16.308480 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-w2ctb_openshift-ovn-kubernetes(54c05b03-6747-47bf-a40d-8a9332c4d856)\"" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.345894 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.345933 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.345946 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.345963 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.345975 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:16Z","lastTransitionTime":"2025-11-24T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.448364 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.448622 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.448693 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.448768 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.448853 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:16Z","lastTransitionTime":"2025-11-24T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.551614 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.552486 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.552520 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.552539 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.552551 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:16Z","lastTransitionTime":"2025-11-24T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.654490 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.654571 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.654588 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.654612 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.654631 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:16Z","lastTransitionTime":"2025-11-24T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.690331 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.690428 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.690448 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.690468 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.690482 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:16Z","lastTransitionTime":"2025-11-24T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:16 crc kubenswrapper[5039]: E1124 13:19:16.709816 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.713942 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.714003 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.714023 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.714047 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.714065 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:16Z","lastTransitionTime":"2025-11-24T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:16 crc kubenswrapper[5039]: E1124 13:19:16.727171 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.730861 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.730907 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.730918 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.730936 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.730949 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:16Z","lastTransitionTime":"2025-11-24T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:16 crc kubenswrapper[5039]: E1124 13:19:16.747593 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.752555 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.752603 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.752617 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.752636 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.752648 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:16Z","lastTransitionTime":"2025-11-24T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:16 crc kubenswrapper[5039]: E1124 13:19:16.769306 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.773027 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.773049 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.773058 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.773071 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.773080 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:16Z","lastTransitionTime":"2025-11-24T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:16 crc kubenswrapper[5039]: E1124 13:19:16.784210 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:16 crc kubenswrapper[5039]: E1124 13:19:16.784392 5039 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.786347 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.786375 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.786386 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.786397 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.786405 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:16Z","lastTransitionTime":"2025-11-24T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.888957 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.888996 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.889006 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.889020 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.889031 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:16Z","lastTransitionTime":"2025-11-24T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.991453 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.991490 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.991521 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.991536 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:16 crc kubenswrapper[5039]: I1124 13:19:16.991546 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:16Z","lastTransitionTime":"2025-11-24T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.094405 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.094452 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.094465 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.094483 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.094494 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:17Z","lastTransitionTime":"2025-11-24T13:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.197290 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.197334 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.197366 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.197387 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.197398 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:17Z","lastTransitionTime":"2025-11-24T13:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.300558 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.300615 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.300632 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.300656 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.300672 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:17Z","lastTransitionTime":"2025-11-24T13:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.305701 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.305738 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:17 crc kubenswrapper[5039]: E1124 13:19:17.305809 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:17 crc kubenswrapper[5039]: E1124 13:19:17.305903 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.403149 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.403212 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.403273 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.403302 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.403325 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:17Z","lastTransitionTime":"2025-11-24T13:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.505717 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.505762 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.505810 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.505858 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.505872 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:17Z","lastTransitionTime":"2025-11-24T13:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.607546 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.607587 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.607599 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.607616 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.607628 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:17Z","lastTransitionTime":"2025-11-24T13:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.709784 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.710354 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.710547 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.710591 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.710617 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:17Z","lastTransitionTime":"2025-11-24T13:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.813281 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.813345 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.813357 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.813379 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.813391 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:17Z","lastTransitionTime":"2025-11-24T13:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.915364 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.915423 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.915440 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.915457 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:17 crc kubenswrapper[5039]: I1124 13:19:17.915470 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:17Z","lastTransitionTime":"2025-11-24T13:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.017471 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.017583 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.017609 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.017634 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.017653 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:18Z","lastTransitionTime":"2025-11-24T13:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.119467 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.119588 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.119614 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.119643 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.119663 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:18Z","lastTransitionTime":"2025-11-24T13:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.222107 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.222151 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.222164 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.222182 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.222194 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:18Z","lastTransitionTime":"2025-11-24T13:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.306671 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:18 crc kubenswrapper[5039]: E1124 13:19:18.306770 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.306969 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:18 crc kubenswrapper[5039]: E1124 13:19:18.307213 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.320428 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cea2230e-9024-455b-87aa-1b4c5b188723\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6328891f72adff70742b2aa64842672875abde57dfd275453ddbc585af80f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://62e7a017d9a3276e864342729bdc35453bb95e9e469760efb6ea283ffb618228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5616706b755bdff47ccadc09ae036e231c76e5953f8c5af9ea9cf8f8e449c59c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.324338 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.324418 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.324455 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.324495 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.324589 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:18Z","lastTransitionTime":"2025-11-24T13:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.333886 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.345156 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.356581 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.367745 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.381124 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.400944 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"message\\\":\\\" handler 2 for removal\\\\nI1124 13:19:02.267268 6719 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 13:19:02.267314 6719 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 13:19:02.267322 6719 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1124 13:19:02.267351 6719 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 13:19:02.267373 6719 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 13:19:02.267384 6719 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 13:19:02.267393 6719 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 13:19:02.267424 6719 factory.go:656] Stopping watch factory\\\\nI1124 13:19:02.267448 6719 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1124 13:19:02.267458 6719 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 13:19:02.267467 6719 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 13:19:02.267475 6719 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1124 13:19:02.267492 6719 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 13:19:02.268628 6719 ovnkube.go:599] Stopped ovnkube\\\\nI1124 13:19:02.268688 6719 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 13:19:02.268771 6719 ovnkube.go:137] failed to run ovnkube: [failed to start network contr\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:19:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-w2ctb_openshift-ovn-kubernetes(54c05b03-6747-47bf-a40d-8a9332c4d856)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.412471 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vnpwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5926107d-81bc-4e34-9e27-8018cbccf590\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vnpwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.424401 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.426816 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.426845 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.426853 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.426866 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.426875 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:18Z","lastTransitionTime":"2025-11-24T13:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.434058 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.446577 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.459576 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.480636 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.492171 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.503359 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.514014 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb6a0812c1e533294fbc653814a92d6672c2d3479d5b28c2c2d5dafae604916c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c8f2f7f29ea14926868534547c27d44a5ad0bb742fd042f42849590e34a54e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-v5nbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.525274 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.528772 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.528805 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.528814 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.528830 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.528840 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:18Z","lastTransitionTime":"2025-11-24T13:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.631724 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.631775 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.631786 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.631801 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.631810 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:18Z","lastTransitionTime":"2025-11-24T13:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.735087 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.735145 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.735166 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.735197 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.735219 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:18Z","lastTransitionTime":"2025-11-24T13:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.837220 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.837267 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.837281 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.837300 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.837311 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:18Z","lastTransitionTime":"2025-11-24T13:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.940110 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.940165 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.940182 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.940207 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:18 crc kubenswrapper[5039]: I1124 13:19:18.940224 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:18Z","lastTransitionTime":"2025-11-24T13:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.042802 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.042840 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.042850 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.042867 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.042878 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:19Z","lastTransitionTime":"2025-11-24T13:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.145258 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.145313 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.145325 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.145342 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.145650 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:19Z","lastTransitionTime":"2025-11-24T13:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.247929 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.247959 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.247966 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.247979 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.247988 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:19Z","lastTransitionTime":"2025-11-24T13:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.306010 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.306068 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:19 crc kubenswrapper[5039]: E1124 13:19:19.306152 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:19 crc kubenswrapper[5039]: E1124 13:19:19.306205 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.350177 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.350214 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.350224 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.350239 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.350249 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:19Z","lastTransitionTime":"2025-11-24T13:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.451813 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.451847 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.451856 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.451870 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.451881 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:19Z","lastTransitionTime":"2025-11-24T13:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.553990 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.554041 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.554059 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.554081 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.554096 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:19Z","lastTransitionTime":"2025-11-24T13:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.656020 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.656071 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.656087 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.656114 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.656137 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:19Z","lastTransitionTime":"2025-11-24T13:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.758539 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.758581 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.758591 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.758606 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.758617 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:19Z","lastTransitionTime":"2025-11-24T13:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.861300 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.861337 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.861347 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.861362 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.861373 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:19Z","lastTransitionTime":"2025-11-24T13:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.964851 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.964913 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.964933 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.964959 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.964977 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:19Z","lastTransitionTime":"2025-11-24T13:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:19 crc kubenswrapper[5039]: I1124 13:19:19.987281 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs\") pod \"network-metrics-daemon-vnpwt\" (UID: \"5926107d-81bc-4e34-9e27-8018cbccf590\") " pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:19 crc kubenswrapper[5039]: E1124 13:19:19.987487 5039 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 13:19:19 crc kubenswrapper[5039]: E1124 13:19:19.987561 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs podName:5926107d-81bc-4e34-9e27-8018cbccf590 nodeName:}" failed. No retries permitted until 2025-11-24 13:19:51.987542083 +0000 UTC m=+104.426666593 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs") pod "network-metrics-daemon-vnpwt" (UID: "5926107d-81bc-4e34-9e27-8018cbccf590") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.068153 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.068214 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.068232 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.068258 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.068274 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:20Z","lastTransitionTime":"2025-11-24T13:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.170436 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.170543 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.170570 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.170606 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.170632 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:20Z","lastTransitionTime":"2025-11-24T13:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.273191 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.273257 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.273270 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.273287 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.273299 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:20Z","lastTransitionTime":"2025-11-24T13:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.306520 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:20 crc kubenswrapper[5039]: E1124 13:19:20.306662 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.306523 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:20 crc kubenswrapper[5039]: E1124 13:19:20.306871 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.375226 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.375278 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.375288 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.375303 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.375312 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:20Z","lastTransitionTime":"2025-11-24T13:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.477799 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.477841 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.477851 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.477868 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.477884 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:20Z","lastTransitionTime":"2025-11-24T13:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.580485 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.580546 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.580557 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.580574 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.580586 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:20Z","lastTransitionTime":"2025-11-24T13:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.683751 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.683789 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.683798 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.683813 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.683825 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:20Z","lastTransitionTime":"2025-11-24T13:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.786715 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.786769 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.786788 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.786813 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.786832 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:20Z","lastTransitionTime":"2025-11-24T13:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.889419 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.889458 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.889471 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.889485 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.889494 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:20Z","lastTransitionTime":"2025-11-24T13:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.992341 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.992385 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.992406 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.992425 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:20 crc kubenswrapper[5039]: I1124 13:19:20.992440 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:20Z","lastTransitionTime":"2025-11-24T13:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.095743 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.095808 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.095825 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.095849 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.095866 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:21Z","lastTransitionTime":"2025-11-24T13:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.198737 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.198786 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.198802 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.198825 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.198842 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:21Z","lastTransitionTime":"2025-11-24T13:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.301669 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.301717 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.301726 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.301752 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.301763 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:21Z","lastTransitionTime":"2025-11-24T13:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.306201 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.306250 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:21 crc kubenswrapper[5039]: E1124 13:19:21.306373 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:21 crc kubenswrapper[5039]: E1124 13:19:21.306709 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.404749 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.404818 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.404841 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.404874 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.404899 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:21Z","lastTransitionTime":"2025-11-24T13:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.507412 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.507462 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.507476 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.507497 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.507528 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:21Z","lastTransitionTime":"2025-11-24T13:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.609588 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.609649 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.609664 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.609684 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.609705 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:21Z","lastTransitionTime":"2025-11-24T13:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.711717 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.711848 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.711933 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.711957 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.711999 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:21Z","lastTransitionTime":"2025-11-24T13:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.716378 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kr94g_6c18c830-d513-4df0-be92-cd44f2d2c5df/kube-multus/0.log" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.716435 5039 generic.go:334] "Generic (PLEG): container finished" podID="6c18c830-d513-4df0-be92-cd44f2d2c5df" containerID="a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9" exitCode=1 Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.716471 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kr94g" event={"ID":"6c18c830-d513-4df0-be92-cd44f2d2c5df","Type":"ContainerDied","Data":"a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9"} Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.716935 5039 scope.go:117] "RemoveContainer" containerID="a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.731688 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cea2230e-9024-455b-87aa-1b4c5b188723\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6328891f72adff70742b2aa64842672875abde57dfd275453ddbc585af80f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://62e7a017d9a3276e864342729bdc35453bb95e9e469760efb6ea283ffb618228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5616706b755bdff47ccadc09ae036e231c76e5953f8c5af9ea9cf8f8e449c59c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.747621 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.761294 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.773280 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.782347 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.795054 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.811743 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"message\\\":\\\" handler 2 for removal\\\\nI1124 13:19:02.267268 6719 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 13:19:02.267314 6719 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 13:19:02.267322 6719 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1124 13:19:02.267351 6719 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 13:19:02.267373 6719 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 13:19:02.267384 6719 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 13:19:02.267393 6719 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 13:19:02.267424 6719 factory.go:656] Stopping watch factory\\\\nI1124 13:19:02.267448 6719 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1124 13:19:02.267458 6719 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 13:19:02.267467 6719 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 13:19:02.267475 6719 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1124 13:19:02.267492 6719 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 13:19:02.268628 6719 ovnkube.go:599] Stopped ovnkube\\\\nI1124 13:19:02.268688 6719 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 13:19:02.268771 6719 ovnkube.go:137] failed to run ovnkube: [failed to start network contr\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:19:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-w2ctb_openshift-ovn-kubernetes(54c05b03-6747-47bf-a40d-8a9332c4d856)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.814754 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.814913 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.814938 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.815331 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.815599 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:21Z","lastTransitionTime":"2025-11-24T13:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.827093 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vnpwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5926107d-81bc-4e34-9e27-8018cbccf590\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vnpwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.842289 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.851319 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.862528 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.873928 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.885103 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.899459 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.916260 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:19:20Z\\\",\\\"message\\\":\\\"2025-11-24T13:18:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_33ca652c-f1b7-4dbc-aa0e-a5c9a7a2c4bd\\\\n2025-11-24T13:18:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_33ca652c-f1b7-4dbc-aa0e-a5c9a7a2c4bd to /host/opt/cni/bin/\\\\n2025-11-24T13:18:35Z [verbose] multus-daemon started\\\\n2025-11-24T13:18:35Z [verbose] Readiness Indicator file check\\\\n2025-11-24T13:19:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.917856 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.917887 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.917896 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.917909 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.917918 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:21Z","lastTransitionTime":"2025-11-24T13:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.929362 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb6a0812c1e533294fbc653814a92d6672c2d3479d5b28c2c2d5dafae604916c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c8f2f7f29ea14926868534547c27d44a5ad0bb742fd042f42849590e34a54e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-v5nbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:21 crc kubenswrapper[5039]: I1124 13:19:21.945780 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.020216 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.020260 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.020271 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.020291 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.020303 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:22Z","lastTransitionTime":"2025-11-24T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.123163 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.123201 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.123210 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.123225 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.123234 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:22Z","lastTransitionTime":"2025-11-24T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.225421 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.225479 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.225534 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.225555 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.225565 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:22Z","lastTransitionTime":"2025-11-24T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.306313 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.306374 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:22 crc kubenswrapper[5039]: E1124 13:19:22.306417 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:22 crc kubenswrapper[5039]: E1124 13:19:22.306632 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.328158 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.328196 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.328228 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.328243 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.328253 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:22Z","lastTransitionTime":"2025-11-24T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.431415 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.431467 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.431484 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.431538 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.431556 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:22Z","lastTransitionTime":"2025-11-24T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.534353 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.534405 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.534418 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.534435 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.534450 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:22Z","lastTransitionTime":"2025-11-24T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.637336 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.637381 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.637390 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.637407 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.637416 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:22Z","lastTransitionTime":"2025-11-24T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.722981 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kr94g_6c18c830-d513-4df0-be92-cd44f2d2c5df/kube-multus/0.log" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.723049 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kr94g" event={"ID":"6c18c830-d513-4df0-be92-cd44f2d2c5df","Type":"ContainerStarted","Data":"8f68c347316af28eef4d9d661fff4ef8497e81704ecbdb6794e54ba842a37e20"} Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.739310 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:22Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.740469 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.740550 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.740569 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.740590 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.740604 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:22Z","lastTransitionTime":"2025-11-24T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.757559 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:22Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.796820 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"message\\\":\\\" handler 2 for removal\\\\nI1124 13:19:02.267268 6719 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 13:19:02.267314 6719 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 13:19:02.267322 6719 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1124 13:19:02.267351 6719 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 13:19:02.267373 6719 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 13:19:02.267384 6719 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 13:19:02.267393 6719 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 13:19:02.267424 6719 factory.go:656] Stopping watch factory\\\\nI1124 13:19:02.267448 6719 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1124 13:19:02.267458 6719 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 13:19:02.267467 6719 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 13:19:02.267475 6719 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1124 13:19:02.267492 6719 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 13:19:02.268628 6719 ovnkube.go:599] Stopped ovnkube\\\\nI1124 13:19:02.268688 6719 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 13:19:02.268771 6719 ovnkube.go:137] failed to run ovnkube: [failed to start network contr\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:19:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-w2ctb_openshift-ovn-kubernetes(54c05b03-6747-47bf-a40d-8a9332c4d856)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:22Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.808319 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vnpwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5926107d-81bc-4e34-9e27-8018cbccf590\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vnpwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:22Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.823634 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:22Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.839044 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:22Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.843664 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.843701 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.843713 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.843735 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.843750 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:22Z","lastTransitionTime":"2025-11-24T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.854756 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:22Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.874299 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:22Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.897066 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:22Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.918079 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:22Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.936918 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:22Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.946650 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.946722 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.946746 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.946779 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.946802 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:22Z","lastTransitionTime":"2025-11-24T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.961814 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f68c347316af28eef4d9d661fff4ef8497e81704ecbdb6794e54ba842a37e20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:19:20Z\\\",\\\"message\\\":\\\"2025-11-24T13:18:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_33ca652c-f1b7-4dbc-aa0e-a5c9a7a2c4bd\\\\n2025-11-24T13:18:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_33ca652c-f1b7-4dbc-aa0e-a5c9a7a2c4bd to /host/opt/cni/bin/\\\\n2025-11-24T13:18:35Z [verbose] multus-daemon started\\\\n2025-11-24T13:18:35Z [verbose] Readiness Indicator file check\\\\n2025-11-24T13:19:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:22Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:22 crc kubenswrapper[5039]: I1124 13:19:22.980404 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb6a0812c1e533294fbc653814a92d6672c2d3479d5b28c2c2d5dafae604916c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c8f2f7f29ea14926868534547c27d44a5ad0bb742fd042f42849590e34a54e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-v5nbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:22Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.000030 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cea2230e-9024-455b-87aa-1b4c5b188723\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6328891f72adff70742b2aa64842672875abde57dfd275453ddbc585af80f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://62e7a017d9a3276e864342729bdc35453bb95e9e469760efb6ea283ffb618228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5616706b755bdff47ccadc09ae036e231c76e5953f8c5af9ea9cf8f8e449c59c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:22Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.021704 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:23Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.037331 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:23Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.049823 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.050066 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.050198 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.050316 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.050418 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:23Z","lastTransitionTime":"2025-11-24T13:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.053278 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:23Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.154623 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.155086 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.155229 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.155414 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.155623 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:23Z","lastTransitionTime":"2025-11-24T13:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.259374 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.259447 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.259492 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.259571 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.259663 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:23Z","lastTransitionTime":"2025-11-24T13:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.305805 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.305906 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:23 crc kubenswrapper[5039]: E1124 13:19:23.305981 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:23 crc kubenswrapper[5039]: E1124 13:19:23.306035 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.363140 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.363242 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.363261 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.363284 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.363301 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:23Z","lastTransitionTime":"2025-11-24T13:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.466407 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.466491 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.466550 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.466579 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.466597 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:23Z","lastTransitionTime":"2025-11-24T13:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.569804 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.569849 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.569860 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.569876 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.569888 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:23Z","lastTransitionTime":"2025-11-24T13:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.674051 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.674129 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.674154 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.674185 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.674209 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:23Z","lastTransitionTime":"2025-11-24T13:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.777382 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.777442 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.777460 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.777485 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.777533 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:23Z","lastTransitionTime":"2025-11-24T13:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.880907 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.880963 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.880986 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.881030 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.881054 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:23Z","lastTransitionTime":"2025-11-24T13:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.983273 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.983339 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.983361 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.983391 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:23 crc kubenswrapper[5039]: I1124 13:19:23.983412 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:23Z","lastTransitionTime":"2025-11-24T13:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.086828 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.086889 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.086911 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.086939 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.086960 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:24Z","lastTransitionTime":"2025-11-24T13:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.189772 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.189857 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.189890 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.189921 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.189945 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:24Z","lastTransitionTime":"2025-11-24T13:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.293266 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.293333 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.293350 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.293373 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.293394 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:24Z","lastTransitionTime":"2025-11-24T13:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.306567 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.306625 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:24 crc kubenswrapper[5039]: E1124 13:19:24.306731 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:24 crc kubenswrapper[5039]: E1124 13:19:24.306840 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.397012 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.397485 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.397734 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.397906 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.398044 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:24Z","lastTransitionTime":"2025-11-24T13:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.500856 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.500926 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.500945 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.500973 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.500991 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:24Z","lastTransitionTime":"2025-11-24T13:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.604247 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.604321 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.604340 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.604365 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.604383 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:24Z","lastTransitionTime":"2025-11-24T13:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.707491 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.707620 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.707644 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.707673 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.707695 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:24Z","lastTransitionTime":"2025-11-24T13:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.811278 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.811340 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.811361 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.811384 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.811553 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:24Z","lastTransitionTime":"2025-11-24T13:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.915450 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.915527 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.915541 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.915559 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:24 crc kubenswrapper[5039]: I1124 13:19:24.915573 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:24Z","lastTransitionTime":"2025-11-24T13:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.019309 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.019383 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.019407 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.019432 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.019450 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:25Z","lastTransitionTime":"2025-11-24T13:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.122482 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.122608 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.122625 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.122652 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.122671 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:25Z","lastTransitionTime":"2025-11-24T13:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.226677 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.226726 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.226735 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.226751 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.226760 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:25Z","lastTransitionTime":"2025-11-24T13:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.305968 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.305968 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:25 crc kubenswrapper[5039]: E1124 13:19:25.306164 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:25 crc kubenswrapper[5039]: E1124 13:19:25.306280 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.329667 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.329752 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.329771 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.329802 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.329829 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:25Z","lastTransitionTime":"2025-11-24T13:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.433598 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.433668 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.433686 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.433712 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.433732 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:25Z","lastTransitionTime":"2025-11-24T13:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.537402 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.537460 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.537475 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.537499 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.537546 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:25Z","lastTransitionTime":"2025-11-24T13:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.640562 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.640608 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.640616 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.640632 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.640642 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:25Z","lastTransitionTime":"2025-11-24T13:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.742699 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.742749 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.742764 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.742783 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.742796 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:25Z","lastTransitionTime":"2025-11-24T13:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.845921 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.845979 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.845988 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.846002 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.846013 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:25Z","lastTransitionTime":"2025-11-24T13:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.948184 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.948242 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.948253 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.948271 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:25 crc kubenswrapper[5039]: I1124 13:19:25.948283 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:25Z","lastTransitionTime":"2025-11-24T13:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.051288 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.051366 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.051400 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.051428 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.051450 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:26Z","lastTransitionTime":"2025-11-24T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.154720 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.154782 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.154798 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.154989 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.155011 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:26Z","lastTransitionTime":"2025-11-24T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.258025 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.258167 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.258248 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.258284 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.258404 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:26Z","lastTransitionTime":"2025-11-24T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.305710 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.305788 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:26 crc kubenswrapper[5039]: E1124 13:19:26.305939 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:26 crc kubenswrapper[5039]: E1124 13:19:26.306161 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.360922 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.360972 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.360988 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.361006 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.361018 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:26Z","lastTransitionTime":"2025-11-24T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.463652 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.463701 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.463712 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.463727 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.463738 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:26Z","lastTransitionTime":"2025-11-24T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.567211 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.567284 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.567307 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.567339 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.567360 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:26Z","lastTransitionTime":"2025-11-24T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.671461 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.671590 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.671624 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.671654 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.671677 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:26Z","lastTransitionTime":"2025-11-24T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.775190 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.775580 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.775602 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.775628 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.775647 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:26Z","lastTransitionTime":"2025-11-24T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.878692 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.878772 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.878790 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.878816 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.878835 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:26Z","lastTransitionTime":"2025-11-24T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.931396 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.931455 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.931476 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.931541 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.931561 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:26Z","lastTransitionTime":"2025-11-24T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:26 crc kubenswrapper[5039]: E1124 13:19:26.953956 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:26Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.960040 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.960109 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.960129 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.960156 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.960173 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:26Z","lastTransitionTime":"2025-11-24T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:26 crc kubenswrapper[5039]: E1124 13:19:26.981738 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:26Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.987347 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.987428 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.987447 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.987483 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:26 crc kubenswrapper[5039]: I1124 13:19:26.987529 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:26Z","lastTransitionTime":"2025-11-24T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:27 crc kubenswrapper[5039]: E1124 13:19:27.008798 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:27Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.014948 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.016042 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.016381 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.016748 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.016902 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:27Z","lastTransitionTime":"2025-11-24T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:27 crc kubenswrapper[5039]: E1124 13:19:27.036078 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:27Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.039903 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.040036 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.040120 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.040206 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.040305 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:27Z","lastTransitionTime":"2025-11-24T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:27 crc kubenswrapper[5039]: E1124 13:19:27.052637 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:27Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:27 crc kubenswrapper[5039]: E1124 13:19:27.052931 5039 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.054472 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.054678 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.054728 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.054765 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.054789 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:27Z","lastTransitionTime":"2025-11-24T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.157954 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.158037 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.158060 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.158086 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.158102 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:27Z","lastTransitionTime":"2025-11-24T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.261603 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.261674 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.261696 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.261722 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.261740 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:27Z","lastTransitionTime":"2025-11-24T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.306421 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.306582 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:27 crc kubenswrapper[5039]: E1124 13:19:27.306656 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:27 crc kubenswrapper[5039]: E1124 13:19:27.306767 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.307775 5039 scope.go:117] "RemoveContainer" containerID="b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.365262 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.365318 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.365338 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.365366 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.365384 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:27Z","lastTransitionTime":"2025-11-24T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.468367 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.468434 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.468457 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.468485 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.468537 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:27Z","lastTransitionTime":"2025-11-24T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.570607 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.570665 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.570689 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.570718 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.570740 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:27Z","lastTransitionTime":"2025-11-24T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.674972 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.675002 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.675011 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.675027 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.675038 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:27Z","lastTransitionTime":"2025-11-24T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.742292 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w2ctb_54c05b03-6747-47bf-a40d-8a9332c4d856/ovnkube-controller/2.log" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.744637 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerStarted","Data":"217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce"} Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.745128 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.762167 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f68c347316af28eef4d9d661fff4ef8497e81704ecbdb6794e54ba842a37e20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:19:20Z\\\",\\\"message\\\":\\\"2025-11-24T13:18:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_33ca652c-f1b7-4dbc-aa0e-a5c9a7a2c4bd\\\\n2025-11-24T13:18:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_33ca652c-f1b7-4dbc-aa0e-a5c9a7a2c4bd to /host/opt/cni/bin/\\\\n2025-11-24T13:18:35Z [verbose] multus-daemon started\\\\n2025-11-24T13:18:35Z [verbose] Readiness Indicator file check\\\\n2025-11-24T13:19:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:27Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.774026 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb6a0812c1e533294fbc653814a92d6672c2d3479d5b28c2c2d5dafae604916c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c8f2f7f29ea14926868534547c27d44a5ad0bb742fd042f42849590e34a54e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-v5nbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:27Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.777495 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.777559 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.777570 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.777584 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.777593 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:27Z","lastTransitionTime":"2025-11-24T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.786968 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:27Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.799920 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:27Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.816893 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:27Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.833354 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:27Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.844718 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:27Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.854991 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cea2230e-9024-455b-87aa-1b4c5b188723\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6328891f72adff70742b2aa64842672875abde57dfd275453ddbc585af80f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://62e7a017d9a3276e864342729bdc35453bb95e9e469760efb6ea283ffb618228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5616706b755bdff47ccadc09ae036e231c76e5953f8c5af9ea9cf8f8e449c59c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:27Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.867603 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:27Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.879456 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:27Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.879576 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.879604 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.879615 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.879631 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.879642 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:27Z","lastTransitionTime":"2025-11-24T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.889088 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vnpwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5926107d-81bc-4e34-9e27-8018cbccf590\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vnpwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:27Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.899419 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:27Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.913415 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:27Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.929987 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"message\\\":\\\" handler 2 for removal\\\\nI1124 13:19:02.267268 6719 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 13:19:02.267314 6719 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 13:19:02.267322 6719 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1124 13:19:02.267351 6719 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 13:19:02.267373 6719 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 13:19:02.267384 6719 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 13:19:02.267393 6719 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 13:19:02.267424 6719 factory.go:656] Stopping watch factory\\\\nI1124 13:19:02.267448 6719 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1124 13:19:02.267458 6719 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 13:19:02.267467 6719 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 13:19:02.267475 6719 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1124 13:19:02.267492 6719 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 13:19:02.268628 6719 ovnkube.go:599] Stopped ovnkube\\\\nI1124 13:19:02.268688 6719 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 13:19:02.268771 6719 ovnkube.go:137] failed to run ovnkube: [failed to start network contr\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:19:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:19:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:27Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.941758 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:27Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.956365 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:27Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.966237 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:27Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.981945 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.981976 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.981984 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.981997 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:27 crc kubenswrapper[5039]: I1124 13:19:27.982006 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:27Z","lastTransitionTime":"2025-11-24T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.084418 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.084534 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.084543 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.084556 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.084564 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:28Z","lastTransitionTime":"2025-11-24T13:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.187140 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.187197 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.187213 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.187227 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.187236 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:28Z","lastTransitionTime":"2025-11-24T13:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.291635 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.291692 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.291708 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.291729 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.291744 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:28Z","lastTransitionTime":"2025-11-24T13:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.306669 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.306700 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:28 crc kubenswrapper[5039]: E1124 13:19:28.306993 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:28 crc kubenswrapper[5039]: E1124 13:19:28.307101 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.319657 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.323132 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.342681 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.355324 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.374709 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f68c347316af28eef4d9d661fff4ef8497e81704ecbdb6794e54ba842a37e20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:19:20Z\\\",\\\"message\\\":\\\"2025-11-24T13:18:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_33ca652c-f1b7-4dbc-aa0e-a5c9a7a2c4bd\\\\n2025-11-24T13:18:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_33ca652c-f1b7-4dbc-aa0e-a5c9a7a2c4bd to /host/opt/cni/bin/\\\\n2025-11-24T13:18:35Z [verbose] multus-daemon started\\\\n2025-11-24T13:18:35Z [verbose] Readiness Indicator file check\\\\n2025-11-24T13:19:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.391574 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb6a0812c1e533294fbc653814a92d6672c2d3479d5b28c2c2d5dafae604916c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c8f2f7f29ea14926868534547c27d44a5ad0bb742fd042f42849590e34a54e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-v5nbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.394143 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.394175 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.394185 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.394203 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.394215 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:28Z","lastTransitionTime":"2025-11-24T13:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.410687 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.422436 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.438486 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.451901 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.465116 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.481993 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cea2230e-9024-455b-87aa-1b4c5b188723\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6328891f72adff70742b2aa64842672875abde57dfd275453ddbc585af80f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://62e7a017d9a3276e864342729bdc35453bb95e9e469760efb6ea283ffb618228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5616706b755bdff47ccadc09ae036e231c76e5953f8c5af9ea9cf8f8e449c59c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.496724 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.496758 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.496769 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.496794 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.496805 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:28Z","lastTransitionTime":"2025-11-24T13:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.499282 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.513622 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.525869 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vnpwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5926107d-81bc-4e34-9e27-8018cbccf590\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vnpwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.542127 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.554470 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.576033 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"message\\\":\\\" handler 2 for removal\\\\nI1124 13:19:02.267268 6719 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 13:19:02.267314 6719 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 13:19:02.267322 6719 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1124 13:19:02.267351 6719 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 13:19:02.267373 6719 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 13:19:02.267384 6719 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 13:19:02.267393 6719 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 13:19:02.267424 6719 factory.go:656] Stopping watch factory\\\\nI1124 13:19:02.267448 6719 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1124 13:19:02.267458 6719 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 13:19:02.267467 6719 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 13:19:02.267475 6719 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1124 13:19:02.267492 6719 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 13:19:02.268628 6719 ovnkube.go:599] Stopped ovnkube\\\\nI1124 13:19:02.268688 6719 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 13:19:02.268771 6719 ovnkube.go:137] failed to run ovnkube: [failed to start network contr\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:19:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:19:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.598981 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.599032 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.599043 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.599062 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.599074 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:28Z","lastTransitionTime":"2025-11-24T13:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.701644 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.701705 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.701722 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.701746 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.701764 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:28Z","lastTransitionTime":"2025-11-24T13:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.751346 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w2ctb_54c05b03-6747-47bf-a40d-8a9332c4d856/ovnkube-controller/3.log" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.752096 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w2ctb_54c05b03-6747-47bf-a40d-8a9332c4d856/ovnkube-controller/2.log" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.755138 5039 generic.go:334] "Generic (PLEG): container finished" podID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerID="217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce" exitCode=1 Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.755185 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerDied","Data":"217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce"} Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.755266 5039 scope.go:117] "RemoveContainer" containerID="b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.756204 5039 scope.go:117] "RemoveContainer" containerID="217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce" Nov 24 13:19:28 crc kubenswrapper[5039]: E1124 13:19:28.756417 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-w2ctb_openshift-ovn-kubernetes(54c05b03-6747-47bf-a40d-8a9332c4d856)\"" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.771333 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.786294 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.795153 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.804407 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.804437 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.804448 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.804463 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.804473 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:28Z","lastTransitionTime":"2025-11-24T13:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.806975 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.819263 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.829222 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.838481 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.850986 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f68c347316af28eef4d9d661fff4ef8497e81704ecbdb6794e54ba842a37e20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:19:20Z\\\",\\\"message\\\":\\\"2025-11-24T13:18:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_33ca652c-f1b7-4dbc-aa0e-a5c9a7a2c4bd\\\\n2025-11-24T13:18:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_33ca652c-f1b7-4dbc-aa0e-a5c9a7a2c4bd to /host/opt/cni/bin/\\\\n2025-11-24T13:18:35Z [verbose] multus-daemon started\\\\n2025-11-24T13:18:35Z [verbose] Readiness Indicator file check\\\\n2025-11-24T13:19:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.860390 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb6a0812c1e533294fbc653814a92d6672c2d3479d5b28c2c2d5dafae604916c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c8f2f7f29ea14926868534547c27d44a5ad0bb742fd042f42849590e34a54e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-v5nbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.869744 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cea2230e-9024-455b-87aa-1b4c5b188723\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6328891f72adff70742b2aa64842672875abde57dfd275453ddbc585af80f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://62e7a017d9a3276e864342729bdc35453bb95e9e469760efb6ea283ffb618228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5616706b755bdff47ccadc09ae036e231c76e5953f8c5af9ea9cf8f8e449c59c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.880416 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.890177 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.900971 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.906679 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.906725 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.906734 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.906747 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.906756 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:28Z","lastTransitionTime":"2025-11-24T13:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.909399 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa79933a-8fd0-4f1f-b467-7d065b56bf03\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8081ca1b2aa80ea0bde26f9fb01960953127daffbbbdbe49afdf7af9a337a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3e22af4a2ec7fcbde66ae997e42b5d809457981a7e33e0f6505ab9c3c1f35f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e22af4a2ec7fcbde66ae997e42b5d809457981a7e33e0f6505ab9c3c1f35f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.918089 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.927895 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.942802 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3aa1f3339a623bd233fdffa35dd23116312e76b88797b318fce6b5f0ead4bb6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"message\\\":\\\" handler 2 for removal\\\\nI1124 13:19:02.267268 6719 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 13:19:02.267314 6719 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 13:19:02.267322 6719 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1124 13:19:02.267351 6719 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 13:19:02.267373 6719 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 13:19:02.267384 6719 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 13:19:02.267393 6719 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 13:19:02.267424 6719 factory.go:656] Stopping watch factory\\\\nI1124 13:19:02.267448 6719 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1124 13:19:02.267458 6719 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 13:19:02.267467 6719 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 13:19:02.267475 6719 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1124 13:19:02.267492 6719 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 13:19:02.268628 6719 ovnkube.go:599] Stopped ovnkube\\\\nI1124 13:19:02.268688 6719 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 13:19:02.268771 6719 ovnkube.go:137] failed to run ovnkube: [failed to start network contr\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:19:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:19:28Z\\\",\\\"message\\\":\\\"amespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:19:28.098252 7064 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:19:28.098344 7064 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 13:19:28.098348 7064 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:19:28.101435 7064 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 13:19:28.101460 7064 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 13:19:28.101490 7064 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 13:19:28.101556 7064 factory.go:656] Stopping watch factory\\\\nI1124 13:19:28.101575 7064 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 13:19:28.111240 7064 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1124 13:19:28.111261 7064 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1124 13:19:28.111307 7064 ovnkube.go:599] Stopped ovnkube\\\\nI1124 13:19:28.111324 7064 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 13:19:28.111414 7064 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:19:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:28 crc kubenswrapper[5039]: I1124 13:19:28.953588 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vnpwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5926107d-81bc-4e34-9e27-8018cbccf590\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vnpwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.008366 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.008421 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.008438 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.008461 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.008479 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:29Z","lastTransitionTime":"2025-11-24T13:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.110910 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.110953 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.110961 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.111006 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.111016 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:29Z","lastTransitionTime":"2025-11-24T13:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.214214 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.214247 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.214255 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.214268 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.214276 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:29Z","lastTransitionTime":"2025-11-24T13:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.306683 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.306759 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:29 crc kubenswrapper[5039]: E1124 13:19:29.306835 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:29 crc kubenswrapper[5039]: E1124 13:19:29.306939 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.317193 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.317293 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.317314 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.317368 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.317398 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:29Z","lastTransitionTime":"2025-11-24T13:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.419447 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.419485 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.419494 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.419536 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.419546 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:29Z","lastTransitionTime":"2025-11-24T13:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.522296 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.522366 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.522390 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.522434 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.522461 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:29Z","lastTransitionTime":"2025-11-24T13:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.624217 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.624260 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.624271 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.624287 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.624298 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:29Z","lastTransitionTime":"2025-11-24T13:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.726573 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.726643 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.726664 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.726692 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.726709 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:29Z","lastTransitionTime":"2025-11-24T13:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.760535 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w2ctb_54c05b03-6747-47bf-a40d-8a9332c4d856/ovnkube-controller/3.log" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.765402 5039 scope.go:117] "RemoveContainer" containerID="217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce" Nov 24 13:19:29 crc kubenswrapper[5039]: E1124 13:19:29.765866 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-w2ctb_openshift-ovn-kubernetes(54c05b03-6747-47bf-a40d-8a9332c4d856)\"" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.788299 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b127a51563ad051932d3b6e86eab38aae9536eb01092f82e64dfbaf59c205\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:29Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.812797 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q77mz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c35ff00-6898-4235-af87-d46e63a20111\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6551cbed0599ea7476dde27fc8a80ebc9419c8ee712c2a29bb47e122b656485\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce7603f7e8f7d6b146e0494ffa28e838c00ad0d9240635a8ccf443e08f0f82c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d86bb8f1843eee5015daf7d06549ab720d6882465bbd1f4294e3088287f37b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3929fcdeee20c1dfb513f92106e66d14b88f1af3a5f7074ea4bf9cdb56220eec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8b9cb7875f4471d45b2fd3032e9d978d858d728bf03b50bed2c111742fe611d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://532a86bf4e7c95702b1d9899c95e056f7af1bd44093a2b1cf2163b71d66f33ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd69cb9f5b435a1b88e1ef71d7b1a6a37b22b1b171e0a5e5971b7af48e027967\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-srmdr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q77mz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:29Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.823746 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dsj42" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88879497-9ba4-4940-975d-d872f5fcccc9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcca90dac36091ea26929deb7507d064c8fb068432b589760a2e8908cc805a18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz78s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:35Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dsj42\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:29Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.829045 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.829116 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.829140 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.829173 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.829200 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:29Z","lastTransitionTime":"2025-11-24T13:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.839544 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:29Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.859805 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:29Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.880089 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:29Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.898716 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:29Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.916841 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f68c347316af28eef4d9d661fff4ef8497e81704ecbdb6794e54ba842a37e20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:19:20Z\\\",\\\"message\\\":\\\"2025-11-24T13:18:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_33ca652c-f1b7-4dbc-aa0e-a5c9a7a2c4bd\\\\n2025-11-24T13:18:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_33ca652c-f1b7-4dbc-aa0e-a5c9a7a2c4bd to /host/opt/cni/bin/\\\\n2025-11-24T13:18:35Z [verbose] multus-daemon started\\\\n2025-11-24T13:18:35Z [verbose] Readiness Indicator file check\\\\n2025-11-24T13:19:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:29Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.932186 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.932227 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.932241 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.932261 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.932277 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:29Z","lastTransitionTime":"2025-11-24T13:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.933480 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2c72781-6c62-4ca8-abee-e9e692ab4a3e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb6a0812c1e533294fbc653814a92d6672c2d3479d5b28c2c2d5dafae604916c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c8f2f7f29ea14926868534547c27d44a5ad0bb742fd042f42849590e34a54e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lfmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-v5nbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:29Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.948371 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cea2230e-9024-455b-87aa-1b4c5b188723\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6328891f72adff70742b2aa64842672875abde57dfd275453ddbc585af80f9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://62e7a017d9a3276e864342729bdc35453bb95e9e469760efb6ea283ffb618228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5616706b755bdff47ccadc09ae036e231c76e5953f8c5af9ea9cf8f8e449c59c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20ee53de3542227f11a8bdc220d94951c15be869a4fa9b4d494836fe5bc8e8ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:29Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.965992 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9761abc84ab50f0f82fa3c63989d0bb7786798a506ad0f238bc0606188e9fea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ceb0320cd3ef33304f5f7733c7ce96d35f2c5bd77bcb4aaf43b86f29a8bbc2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:29Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:29 crc kubenswrapper[5039]: I1124 13:19:29.982475 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:29Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.012146 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20cc7fe8ea75363a7348eeb38e521ee98f04750ff3e53ab12b6d3f6de8fff044\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.035255 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.035292 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.035303 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.035319 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.035332 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:30Z","lastTransitionTime":"2025-11-24T13:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.044440 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa79933a-8fd0-4f1f-b467-7d065b56bf03\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8081ca1b2aa80ea0bde26f9fb01960953127daffbbbdbe49afdf7af9a337a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3e22af4a2ec7fcbde66ae997e42b5d809457981a7e33e0f6505ab9c3c1f35f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e22af4a2ec7fcbde66ae997e42b5d809457981a7e33e0f6505ab9c3c1f35f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.056223 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k79vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8413bf0a-e541-473a-ae4a-155c6f91b570\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07dfc8648defdf4b7ee17fbf2f22add20b8c5dc102dad3d0d00b6813d2165e3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whj5m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k79vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.068676 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ce86b4cd-2cb0-4cec-8b42-22a855734a60\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8e64c36de8e4ef24b03faf29179e28925b0773580796db155adb0d6194634e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q2rcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8x5rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.091678 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54c05b03-6747-47bf-a40d-8a9332c4d856\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:19:28Z\\\",\\\"message\\\":\\\"amespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:19:28.098252 7064 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:19:28.098344 7064 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 13:19:28.098348 7064 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1124 13:19:28.101435 7064 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 13:19:28.101460 7064 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 13:19:28.101490 7064 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 13:19:28.101556 7064 factory.go:656] Stopping watch factory\\\\nI1124 13:19:28.101575 7064 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 13:19:28.111240 7064 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1124 13:19:28.111261 7064 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1124 13:19:28.111307 7064 ovnkube.go:599] Stopped ovnkube\\\\nI1124 13:19:28.111324 7064 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 13:19:28.111414 7064 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:19:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-w2ctb_openshift-ovn-kubernetes(54c05b03-6747-47bf-a40d-8a9332c4d856)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fhfzg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w2ctb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.106134 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vnpwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5926107d-81bc-4e34-9e27-8018cbccf590\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wprl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vnpwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:30Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.139629 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.139685 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.139703 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.139725 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.139742 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:30Z","lastTransitionTime":"2025-11-24T13:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.242738 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.242816 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.242841 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.242874 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.242897 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:30Z","lastTransitionTime":"2025-11-24T13:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.306015 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:30 crc kubenswrapper[5039]: E1124 13:19:30.306238 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.306294 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:30 crc kubenswrapper[5039]: E1124 13:19:30.306595 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.345521 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.345569 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.345582 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.345602 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.345617 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:30Z","lastTransitionTime":"2025-11-24T13:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.449159 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.449672 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.449871 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.450101 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.450318 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:30Z","lastTransitionTime":"2025-11-24T13:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.552856 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.552896 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.552906 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.552923 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.552935 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:30Z","lastTransitionTime":"2025-11-24T13:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.656018 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.656069 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.656085 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.656106 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.656122 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:30Z","lastTransitionTime":"2025-11-24T13:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.758801 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.758865 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.758901 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.758930 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.758950 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:30Z","lastTransitionTime":"2025-11-24T13:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.861357 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.861432 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.861458 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.861489 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.861550 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:30Z","lastTransitionTime":"2025-11-24T13:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.965538 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.965595 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.965614 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.965640 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:30 crc kubenswrapper[5039]: I1124 13:19:30.965658 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:30Z","lastTransitionTime":"2025-11-24T13:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.069238 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.069296 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.069315 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.069339 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.069360 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:31Z","lastTransitionTime":"2025-11-24T13:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.172545 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.172616 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.172633 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.172660 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.172678 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:31Z","lastTransitionTime":"2025-11-24T13:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.275587 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.275636 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.275648 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.275664 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.275675 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:31Z","lastTransitionTime":"2025-11-24T13:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.306188 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.306325 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:31 crc kubenswrapper[5039]: E1124 13:19:31.306356 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:31 crc kubenswrapper[5039]: E1124 13:19:31.306559 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.378769 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.378812 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.378825 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.378842 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.378856 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:31Z","lastTransitionTime":"2025-11-24T13:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.482367 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.482430 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.482497 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.482560 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.482582 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:31Z","lastTransitionTime":"2025-11-24T13:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.585988 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.586057 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.586073 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.586109 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.586127 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:31Z","lastTransitionTime":"2025-11-24T13:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.688808 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.688859 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.688871 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.688893 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.688911 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:31Z","lastTransitionTime":"2025-11-24T13:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.791429 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.791582 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.791607 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.791687 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.791754 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:31Z","lastTransitionTime":"2025-11-24T13:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.894477 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.894558 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.894576 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.894599 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.894634 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:31Z","lastTransitionTime":"2025-11-24T13:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.997281 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.997365 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.997389 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.997418 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:31 crc kubenswrapper[5039]: I1124 13:19:31.997442 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:31Z","lastTransitionTime":"2025-11-24T13:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.100210 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.100299 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.100317 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.100346 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.100365 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:32Z","lastTransitionTime":"2025-11-24T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.203368 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.203457 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.203475 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.203541 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.203563 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:32Z","lastTransitionTime":"2025-11-24T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.305914 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.306099 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.306287 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.306360 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.306387 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.306417 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.306442 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:32Z","lastTransitionTime":"2025-11-24T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:32 crc kubenswrapper[5039]: E1124 13:19:32.306403 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:32 crc kubenswrapper[5039]: E1124 13:19:32.306634 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.410128 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.410207 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.410245 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.410378 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.410465 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:32Z","lastTransitionTime":"2025-11-24T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.513093 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.513161 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.513183 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.513217 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.513242 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:32Z","lastTransitionTime":"2025-11-24T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.615684 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.615940 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.616064 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.616145 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.616224 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:32Z","lastTransitionTime":"2025-11-24T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.718665 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.718721 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.718741 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.718770 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.718789 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:32Z","lastTransitionTime":"2025-11-24T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.821261 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.821302 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.821316 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.821348 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.821364 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:32Z","lastTransitionTime":"2025-11-24T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.924013 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.924041 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.924052 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.924084 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:32 crc kubenswrapper[5039]: I1124 13:19:32.924096 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:32Z","lastTransitionTime":"2025-11-24T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.026271 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.026314 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.026324 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.026339 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.026351 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:33Z","lastTransitionTime":"2025-11-24T13:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.127995 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:19:33 crc kubenswrapper[5039]: E1124 13:19:33.128648 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.128337612 +0000 UTC m=+149.567462142 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.130059 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.130129 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.130152 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.130180 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.130195 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:33Z","lastTransitionTime":"2025-11-24T13:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.229772 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.229868 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.229913 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.229955 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:33 crc kubenswrapper[5039]: E1124 13:19:33.230010 5039 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 13:19:33 crc kubenswrapper[5039]: E1124 13:19:33.230086 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 13:19:33 crc kubenswrapper[5039]: E1124 13:19:33.230111 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 13:19:33 crc kubenswrapper[5039]: E1124 13:19:33.230131 5039 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:19:33 crc kubenswrapper[5039]: E1124 13:19:33.230133 5039 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 13:19:33 crc kubenswrapper[5039]: E1124 13:19:33.230114 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.230086097 +0000 UTC m=+149.669210637 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 13:19:33 crc kubenswrapper[5039]: E1124 13:19:33.230200 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.230181929 +0000 UTC m=+149.669306469 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:19:33 crc kubenswrapper[5039]: E1124 13:19:33.230214 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 13:19:33 crc kubenswrapper[5039]: E1124 13:19:33.230243 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.23021671 +0000 UTC m=+149.669341240 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 13:19:33 crc kubenswrapper[5039]: E1124 13:19:33.230250 5039 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 13:19:33 crc kubenswrapper[5039]: E1124 13:19:33.230272 5039 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:19:33 crc kubenswrapper[5039]: E1124 13:19:33.230329 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.230309133 +0000 UTC m=+149.669433693 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.232771 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.232832 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.232855 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.232887 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.232912 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:33Z","lastTransitionTime":"2025-11-24T13:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.305712 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:33 crc kubenswrapper[5039]: E1124 13:19:33.305896 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.305712 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:33 crc kubenswrapper[5039]: E1124 13:19:33.306192 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.336060 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.336130 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.336148 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.336171 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.336188 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:33Z","lastTransitionTime":"2025-11-24T13:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.438940 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.439437 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.439618 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.439758 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.439896 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:33Z","lastTransitionTime":"2025-11-24T13:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.545454 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.545746 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.545826 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.545907 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.545983 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:33Z","lastTransitionTime":"2025-11-24T13:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.648463 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.648539 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.648555 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.648571 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.648582 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:33Z","lastTransitionTime":"2025-11-24T13:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.751269 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.751326 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.751343 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.751366 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.751404 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:33Z","lastTransitionTime":"2025-11-24T13:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.854759 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.854820 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.854838 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.854864 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.854881 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:33Z","lastTransitionTime":"2025-11-24T13:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.958246 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.958291 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.958316 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.958363 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:33 crc kubenswrapper[5039]: I1124 13:19:33.958387 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:33Z","lastTransitionTime":"2025-11-24T13:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.061846 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.061905 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.061925 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.061951 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.061970 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:34Z","lastTransitionTime":"2025-11-24T13:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.165870 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.166265 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.166402 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.166572 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.166736 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:34Z","lastTransitionTime":"2025-11-24T13:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.270374 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.270651 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.270721 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.270790 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.270847 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:34Z","lastTransitionTime":"2025-11-24T13:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.306703 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:34 crc kubenswrapper[5039]: E1124 13:19:34.306993 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.306846 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:34 crc kubenswrapper[5039]: E1124 13:19:34.307186 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.373360 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.373410 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.373460 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.373482 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.373496 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:34Z","lastTransitionTime":"2025-11-24T13:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.475797 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.476095 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.476170 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.476237 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.476296 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:34Z","lastTransitionTime":"2025-11-24T13:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.578664 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.579049 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.579245 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.579434 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.579681 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:34Z","lastTransitionTime":"2025-11-24T13:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.682627 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.682663 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.682674 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.682690 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.682701 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:34Z","lastTransitionTime":"2025-11-24T13:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.785302 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.785334 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.785344 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.785360 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.785375 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:34Z","lastTransitionTime":"2025-11-24T13:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.888559 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.888617 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.888626 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.888640 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.888648 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:34Z","lastTransitionTime":"2025-11-24T13:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.992062 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.992239 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.992256 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.992273 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:34 crc kubenswrapper[5039]: I1124 13:19:34.992286 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:34Z","lastTransitionTime":"2025-11-24T13:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.094835 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.095765 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.095808 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.095845 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.095874 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:35Z","lastTransitionTime":"2025-11-24T13:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.198563 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.198629 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.198648 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.198673 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.198691 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:35Z","lastTransitionTime":"2025-11-24T13:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.302383 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.302447 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.302467 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.302496 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.302579 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:35Z","lastTransitionTime":"2025-11-24T13:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.305741 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:35 crc kubenswrapper[5039]: E1124 13:19:35.305939 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.306093 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:35 crc kubenswrapper[5039]: E1124 13:19:35.306241 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.406120 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.406219 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.406240 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.406265 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.406282 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:35Z","lastTransitionTime":"2025-11-24T13:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.510268 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.510346 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.510360 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.510383 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.510395 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:35Z","lastTransitionTime":"2025-11-24T13:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.614190 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.614241 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.614255 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.614277 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.614293 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:35Z","lastTransitionTime":"2025-11-24T13:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.717230 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.717297 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.717320 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.717352 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.717374 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:35Z","lastTransitionTime":"2025-11-24T13:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.819698 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.819759 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.819776 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.819800 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.819819 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:35Z","lastTransitionTime":"2025-11-24T13:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.922297 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.922366 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.922423 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.922458 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:35 crc kubenswrapper[5039]: I1124 13:19:35.922486 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:35Z","lastTransitionTime":"2025-11-24T13:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.025731 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.025840 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.025866 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.025901 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.025927 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:36Z","lastTransitionTime":"2025-11-24T13:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.128922 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.128991 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.129011 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.129032 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.129049 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:36Z","lastTransitionTime":"2025-11-24T13:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.232724 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.232781 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.232793 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.232812 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.232828 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:36Z","lastTransitionTime":"2025-11-24T13:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.306220 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.306357 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:36 crc kubenswrapper[5039]: E1124 13:19:36.306398 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:36 crc kubenswrapper[5039]: E1124 13:19:36.306698 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.335826 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.335884 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.335905 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.335931 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.335951 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:36Z","lastTransitionTime":"2025-11-24T13:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.438959 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.439351 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.439488 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.439626 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.439713 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:36Z","lastTransitionTime":"2025-11-24T13:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.542804 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.542845 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.542855 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.542873 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.542884 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:36Z","lastTransitionTime":"2025-11-24T13:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.646352 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.646426 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.646439 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.646480 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.646496 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:36Z","lastTransitionTime":"2025-11-24T13:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.749247 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.749333 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.749357 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.749388 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.749406 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:36Z","lastTransitionTime":"2025-11-24T13:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.851208 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.851244 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.851254 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.851268 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.851282 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:36Z","lastTransitionTime":"2025-11-24T13:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.954221 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.954265 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.954273 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.954290 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:36 crc kubenswrapper[5039]: I1124 13:19:36.954299 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:36Z","lastTransitionTime":"2025-11-24T13:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.057967 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.058104 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.058129 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.058157 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.058220 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:37Z","lastTransitionTime":"2025-11-24T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.161748 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.161799 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.161816 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.161836 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.161850 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:37Z","lastTransitionTime":"2025-11-24T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.264941 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.264990 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.265008 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.265033 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.265051 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:37Z","lastTransitionTime":"2025-11-24T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.306258 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:37 crc kubenswrapper[5039]: E1124 13:19:37.306455 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.306748 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:37 crc kubenswrapper[5039]: E1124 13:19:37.306882 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.339897 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.339935 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.339945 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.339962 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.339974 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:37Z","lastTransitionTime":"2025-11-24T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:37 crc kubenswrapper[5039]: E1124 13:19:37.358704 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.363774 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.363828 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.363841 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.363857 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.363869 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:37Z","lastTransitionTime":"2025-11-24T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:37 crc kubenswrapper[5039]: E1124 13:19:37.379075 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.383985 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.384029 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.384043 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.384074 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.384086 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:37Z","lastTransitionTime":"2025-11-24T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:37 crc kubenswrapper[5039]: E1124 13:19:37.396278 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.400214 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.400249 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.400258 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.400272 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.400283 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:37Z","lastTransitionTime":"2025-11-24T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:37 crc kubenswrapper[5039]: E1124 13:19:37.414584 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.419139 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.419229 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.419253 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.419280 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.419301 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:37Z","lastTransitionTime":"2025-11-24T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:37 crc kubenswrapper[5039]: E1124 13:19:37.437815 5039 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4dbca686-4ed9-4a84-88e1-60f728139059\\\",\\\"systemUUID\\\":\\\"afac5e87-a763-4b8d-96ee-10f975a13d9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:37 crc kubenswrapper[5039]: E1124 13:19:37.438035 5039 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.439646 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.439683 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.439694 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.439711 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.439724 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:37Z","lastTransitionTime":"2025-11-24T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.542376 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.542432 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.542444 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.542462 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.542486 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:37Z","lastTransitionTime":"2025-11-24T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.645356 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.645419 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.645438 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.645468 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.645496 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:37Z","lastTransitionTime":"2025-11-24T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.748069 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.748113 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.748126 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.748140 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.748151 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:37Z","lastTransitionTime":"2025-11-24T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.851581 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.851670 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.851696 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.851729 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.851764 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:37Z","lastTransitionTime":"2025-11-24T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.954858 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.954933 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.954955 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.954982 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:37 crc kubenswrapper[5039]: I1124 13:19:37.955001 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:37Z","lastTransitionTime":"2025-11-24T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.057325 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.057371 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.057383 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.057401 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.057413 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:38Z","lastTransitionTime":"2025-11-24T13:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.161486 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.161568 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.161580 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.161602 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.161616 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:38Z","lastTransitionTime":"2025-11-24T13:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.264076 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.264120 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.264143 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.264158 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.264169 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:38Z","lastTransitionTime":"2025-11-24T13:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.306494 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.306531 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:38 crc kubenswrapper[5039]: E1124 13:19:38.307070 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:38 crc kubenswrapper[5039]: E1124 13:19:38.307679 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.325445 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0395a0c-b668-4a2e-bf3b-0cffccfd7fa9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5af3a49c2811916009a13f5ba5b9bfa08f35a81fdc13e1f8425309d2d6274890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a80b13940865b0e89fbc388588f550524bc07a74f9d67260194dba30d2bcd24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25b28008bcc978e76c38e5e69245e4802c6782f3014ba3848a9afa25df7d1819\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7143db056b6682310a3ea184a5b628dd477d25651df0c70a6fb2013427c047c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.344207 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14d3f658-8ca1-4a4c-9de1-652ddeea4ef2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d0a9c652247fbdbbdf922a8acc1d870725ec275fc460014f010116a4bf3bb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c031161c8bff4b43822d11c189bd6751c4ce555f3194ebc43ac37166a0b5e8d4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fd44f37e7ed3646d5b362133f688c24d42853a71896d489375dac2c4767b6c5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://523c00d36dbd5606f2e188eb523bbbbf51e321b57fb14ef94e6badf6bf4482a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42923fe44e5120047ef055b2f36f44036edb37388b8fe624dc8fac7b169fc53e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T13:18:28Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1124 13:18:22.926132 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1124 13:18:22.928700 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3469244204/tls.crt::/tmp/serving-cert-3469244204/tls.key\\\\\\\"\\\\nI1124 13:18:28.660428 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 13:18:28.665649 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 13:18:28.665684 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 13:18:28.665725 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 13:18:28.665737 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 13:18:28.679654 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1124 13:18:28.679695 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1124 13:18:28.679706 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679763 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 13:18:28.679776 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 13:18:28.679788 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 13:18:28.679796 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 13:18:28.679805 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1124 13:18:28.683404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1e33844ad330296a09679d4c088680210af633e1bd4c1d9ab0594027339386\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:18:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65c425d84bb8c73d19a0693399a6695dda39b94ee3ca027a83258ff186f7d2bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T13:18:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.361497 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.366379 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.366487 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.366579 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.366608 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.366625 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:38Z","lastTransitionTime":"2025-11-24T13:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.378802 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:29Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.396596 5039 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kr94g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6c18c830-d513-4df0-be92-cd44f2d2c5df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:18:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T13:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f68c347316af28eef4d9d661fff4ef8497e81704ecbdb6794e54ba842a37e20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T13:19:20Z\\\",\\\"message\\\":\\\"2025-11-24T13:18:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_33ca652c-f1b7-4dbc-aa0e-a5c9a7a2c4bd\\\\n2025-11-24T13:18:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_33ca652c-f1b7-4dbc-aa0e-a5c9a7a2c4bd to /host/opt/cni/bin/\\\\n2025-11-24T13:18:35Z [verbose] multus-daemon started\\\\n2025-11-24T13:18:35Z [verbose] Readiness Indicator file check\\\\n2025-11-24T13:19:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T13:18:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T13:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkv7g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T13:18:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kr94g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.441835 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=36.441805335 podStartE2EDuration="36.441805335s" podCreationTimestamp="2025-11-24 13:19:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:19:38.441772244 +0000 UTC m=+90.880896774" watchObservedRunningTime="2025-11-24 13:19:38.441805335 +0000 UTC m=+90.880929875" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.442098 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-v5nbc" podStartSLOduration=65.442092142 podStartE2EDuration="1m5.442092142s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:19:38.424781736 +0000 UTC m=+90.863906266" watchObservedRunningTime="2025-11-24 13:19:38.442092142 +0000 UTC m=+90.881216662" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.470046 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.470430 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.470446 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.470468 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.470483 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:38Z","lastTransitionTime":"2025-11-24T13:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.515863 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=10.515842555 podStartE2EDuration="10.515842555s" podCreationTimestamp="2025-11-24 13:19:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:19:38.504191685 +0000 UTC m=+90.943316195" watchObservedRunningTime="2025-11-24 13:19:38.515842555 +0000 UTC m=+90.954967065" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.526874 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-k79vj" podStartSLOduration=65.52685386 podStartE2EDuration="1m5.52685386s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:19:38.516231835 +0000 UTC m=+90.955356355" watchObservedRunningTime="2025-11-24 13:19:38.52685386 +0000 UTC m=+90.965978370" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.527220 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podStartSLOduration=65.527213779 podStartE2EDuration="1m5.527213779s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:19:38.527024834 +0000 UTC m=+90.966149354" watchObservedRunningTime="2025-11-24 13:19:38.527213779 +0000 UTC m=+90.966338289" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.572372 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.572417 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.572429 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.572444 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.572456 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:38Z","lastTransitionTime":"2025-11-24T13:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.592651 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-q77mz" podStartSLOduration=65.592636007 podStartE2EDuration="1m5.592636007s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:19:38.591690212 +0000 UTC m=+91.030814732" watchObservedRunningTime="2025-11-24 13:19:38.592636007 +0000 UTC m=+91.031760507" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.603823 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-dsj42" podStartSLOduration=65.603799035 podStartE2EDuration="1m5.603799035s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:19:38.603059865 +0000 UTC m=+91.042184365" watchObservedRunningTime="2025-11-24 13:19:38.603799035 +0000 UTC m=+91.042923555" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.675030 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.675061 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.675071 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.675086 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.675096 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:38Z","lastTransitionTime":"2025-11-24T13:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.778100 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.778130 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.778138 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.778154 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.778163 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:38Z","lastTransitionTime":"2025-11-24T13:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.881728 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.881767 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.881778 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.881796 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.881808 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:38Z","lastTransitionTime":"2025-11-24T13:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.985090 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.985417 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.985704 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.985893 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:38 crc kubenswrapper[5039]: I1124 13:19:38.986019 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:38Z","lastTransitionTime":"2025-11-24T13:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.088225 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.088291 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.088328 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.088350 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.088367 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:39Z","lastTransitionTime":"2025-11-24T13:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.190656 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.190704 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.190713 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.190729 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.190739 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:39Z","lastTransitionTime":"2025-11-24T13:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.293574 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.294051 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.294103 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.294137 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.294159 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:39Z","lastTransitionTime":"2025-11-24T13:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.306427 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.306465 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:39 crc kubenswrapper[5039]: E1124 13:19:39.306625 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:39 crc kubenswrapper[5039]: E1124 13:19:39.306762 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.396905 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.396965 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.396999 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.397029 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.397049 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:39Z","lastTransitionTime":"2025-11-24T13:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.500413 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.500476 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.500563 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.500600 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.500621 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:39Z","lastTransitionTime":"2025-11-24T13:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.603308 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.603372 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.603393 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.603421 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.603443 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:39Z","lastTransitionTime":"2025-11-24T13:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.706107 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.706226 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.706291 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.706324 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.706348 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:39Z","lastTransitionTime":"2025-11-24T13:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.808448 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.808558 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.808583 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.808613 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.808634 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:39Z","lastTransitionTime":"2025-11-24T13:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.911297 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.911450 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.911472 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.911496 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:39 crc kubenswrapper[5039]: I1124 13:19:39.911539 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:39Z","lastTransitionTime":"2025-11-24T13:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.013300 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.013339 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.013348 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.013360 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.013370 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:40Z","lastTransitionTime":"2025-11-24T13:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.115178 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.115415 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.115515 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.115633 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.115728 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:40Z","lastTransitionTime":"2025-11-24T13:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.218435 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.218489 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.218500 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.218537 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.218548 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:40Z","lastTransitionTime":"2025-11-24T13:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.306386 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.306450 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:40 crc kubenswrapper[5039]: E1124 13:19:40.306638 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:40 crc kubenswrapper[5039]: E1124 13:19:40.306761 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.321124 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.321178 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.321194 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.321216 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.321235 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:40Z","lastTransitionTime":"2025-11-24T13:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.423321 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.423374 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.423386 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.423407 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.423419 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:40Z","lastTransitionTime":"2025-11-24T13:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.526867 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.526924 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.526940 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.526963 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.526978 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:40Z","lastTransitionTime":"2025-11-24T13:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.629409 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.629471 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.629487 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.629542 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.629560 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:40Z","lastTransitionTime":"2025-11-24T13:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.732423 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.732470 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.732482 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.732498 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.732554 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:40Z","lastTransitionTime":"2025-11-24T13:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.835581 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.835619 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.835628 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.835647 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.835663 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:40Z","lastTransitionTime":"2025-11-24T13:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.938274 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.938744 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.938871 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.938968 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:40 crc kubenswrapper[5039]: I1124 13:19:40.939070 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:40Z","lastTransitionTime":"2025-11-24T13:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.043291 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.043388 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.043457 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.043492 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.043574 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:41Z","lastTransitionTime":"2025-11-24T13:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.147089 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.147160 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.147200 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.147231 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.147254 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:41Z","lastTransitionTime":"2025-11-24T13:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.249556 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.249613 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.249626 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.249642 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.250021 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:41Z","lastTransitionTime":"2025-11-24T13:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.306356 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.306368 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:41 crc kubenswrapper[5039]: E1124 13:19:41.306774 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:41 crc kubenswrapper[5039]: E1124 13:19:41.306866 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.353300 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.353373 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.353391 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.353415 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.353433 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:41Z","lastTransitionTime":"2025-11-24T13:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.456600 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.456762 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.456789 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.456814 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.456832 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:41Z","lastTransitionTime":"2025-11-24T13:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.559529 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.559561 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.559571 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.559585 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.559593 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:41Z","lastTransitionTime":"2025-11-24T13:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.662400 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.662450 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.662465 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.662485 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.662496 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:41Z","lastTransitionTime":"2025-11-24T13:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.765059 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.765131 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.765143 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.765189 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.765202 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:41Z","lastTransitionTime":"2025-11-24T13:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.866959 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.867001 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.867011 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.867026 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.867035 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:41Z","lastTransitionTime":"2025-11-24T13:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.970094 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.970184 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.970204 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.970224 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:41 crc kubenswrapper[5039]: I1124 13:19:41.970239 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:41Z","lastTransitionTime":"2025-11-24T13:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.072562 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.072625 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.072637 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.072683 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.072697 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:42Z","lastTransitionTime":"2025-11-24T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.175687 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.175725 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.175734 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.175748 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.175759 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:42Z","lastTransitionTime":"2025-11-24T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.278185 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.278258 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.278273 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.278290 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.278303 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:42Z","lastTransitionTime":"2025-11-24T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.323999 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:42 crc kubenswrapper[5039]: E1124 13:19:42.324094 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.324005 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:42 crc kubenswrapper[5039]: E1124 13:19:42.324270 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.381315 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.381367 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.381379 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.381397 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.381408 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:42Z","lastTransitionTime":"2025-11-24T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.484105 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.484156 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.484169 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.484187 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.484198 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:42Z","lastTransitionTime":"2025-11-24T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.586735 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.586787 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.586798 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.586814 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.586825 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:42Z","lastTransitionTime":"2025-11-24T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.688617 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.689033 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.689257 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.689442 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.689650 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:42Z","lastTransitionTime":"2025-11-24T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.792183 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.792226 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.792236 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.792251 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.792262 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:42Z","lastTransitionTime":"2025-11-24T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.895217 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.895261 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.895272 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.895287 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.895298 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:42Z","lastTransitionTime":"2025-11-24T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.997052 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.997099 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.997109 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.997125 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:42 crc kubenswrapper[5039]: I1124 13:19:42.997135 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:42Z","lastTransitionTime":"2025-11-24T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.099977 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.100233 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.100328 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.100404 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.100464 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:43Z","lastTransitionTime":"2025-11-24T13:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.202593 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.202883 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.202986 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.203080 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.203174 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:43Z","lastTransitionTime":"2025-11-24T13:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.305734 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:43 crc kubenswrapper[5039]: E1124 13:19:43.305848 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.305995 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.306035 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.306055 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.306080 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.306102 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:43Z","lastTransitionTime":"2025-11-24T13:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.305734 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:43 crc kubenswrapper[5039]: E1124 13:19:43.306710 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.409376 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.409451 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.409473 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.409567 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.409597 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:43Z","lastTransitionTime":"2025-11-24T13:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.514217 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.514276 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.514297 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.514321 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.514333 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:43Z","lastTransitionTime":"2025-11-24T13:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.618494 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.618584 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.618803 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.618825 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.618838 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:43Z","lastTransitionTime":"2025-11-24T13:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.722139 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.722217 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.722240 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.722272 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.722294 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:43Z","lastTransitionTime":"2025-11-24T13:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.826129 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.826171 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.826181 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.826197 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.826209 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:43Z","lastTransitionTime":"2025-11-24T13:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.929190 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.929264 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.929276 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.929295 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:43 crc kubenswrapper[5039]: I1124 13:19:43.929309 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:43Z","lastTransitionTime":"2025-11-24T13:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.031983 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.032052 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.032078 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.032110 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.032152 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:44Z","lastTransitionTime":"2025-11-24T13:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.135139 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.135198 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.135215 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.135238 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.135254 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:44Z","lastTransitionTime":"2025-11-24T13:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.238042 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.238110 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.238130 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.238158 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.238176 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:44Z","lastTransitionTime":"2025-11-24T13:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.306084 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:44 crc kubenswrapper[5039]: E1124 13:19:44.306359 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.306837 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:44 crc kubenswrapper[5039]: E1124 13:19:44.306985 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.308761 5039 scope.go:117] "RemoveContainer" containerID="217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce" Nov 24 13:19:44 crc kubenswrapper[5039]: E1124 13:19:44.309057 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-w2ctb_openshift-ovn-kubernetes(54c05b03-6747-47bf-a40d-8a9332c4d856)\"" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.330475 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.340766 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.340870 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.340896 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.340926 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.340950 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:44Z","lastTransitionTime":"2025-11-24T13:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.443703 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.443773 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.443786 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.443831 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.443847 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:44Z","lastTransitionTime":"2025-11-24T13:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.547254 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.547299 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.547316 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.547340 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.547356 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:44Z","lastTransitionTime":"2025-11-24T13:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.650670 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.650718 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.650728 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.650742 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.650752 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:44Z","lastTransitionTime":"2025-11-24T13:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.754598 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.755049 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.755176 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.755291 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.755373 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:44Z","lastTransitionTime":"2025-11-24T13:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.858559 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.858892 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.859101 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.859287 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.859491 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:44Z","lastTransitionTime":"2025-11-24T13:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.962662 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.962735 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.962753 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.962777 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:44 crc kubenswrapper[5039]: I1124 13:19:44.962794 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:44Z","lastTransitionTime":"2025-11-24T13:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.066152 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.066260 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.066327 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.066358 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.066381 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:45Z","lastTransitionTime":"2025-11-24T13:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.169563 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.169627 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.169654 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.169685 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.169708 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:45Z","lastTransitionTime":"2025-11-24T13:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.273090 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.273134 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.273151 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.273173 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.273189 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:45Z","lastTransitionTime":"2025-11-24T13:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.306337 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:45 crc kubenswrapper[5039]: E1124 13:19:45.307426 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.306362 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:45 crc kubenswrapper[5039]: E1124 13:19:45.307555 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.377235 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.377311 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.377333 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.377363 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.377386 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:45Z","lastTransitionTime":"2025-11-24T13:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.480627 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.480926 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.481111 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.481359 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.481594 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:45Z","lastTransitionTime":"2025-11-24T13:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.585176 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.585220 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.585237 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.585259 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.585276 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:45Z","lastTransitionTime":"2025-11-24T13:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.687213 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.687274 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.687293 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.687320 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.687338 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:45Z","lastTransitionTime":"2025-11-24T13:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.790112 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.790156 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.790167 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.790184 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.790195 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:45Z","lastTransitionTime":"2025-11-24T13:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.893599 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.893654 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.893668 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.893689 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.893707 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:45Z","lastTransitionTime":"2025-11-24T13:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.997323 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.997446 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.997471 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.997495 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:45 crc kubenswrapper[5039]: I1124 13:19:45.997561 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:45Z","lastTransitionTime":"2025-11-24T13:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.100869 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.100940 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.100973 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.101013 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.101036 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:46Z","lastTransitionTime":"2025-11-24T13:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.203960 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.204038 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.204062 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.204127 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.204154 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:46Z","lastTransitionTime":"2025-11-24T13:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.305736 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.305821 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:46 crc kubenswrapper[5039]: E1124 13:19:46.305961 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:46 crc kubenswrapper[5039]: E1124 13:19:46.306152 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.308005 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.308060 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.308085 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.308115 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.308141 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:46Z","lastTransitionTime":"2025-11-24T13:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.411943 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.412235 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.412424 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.412608 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.412739 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:46Z","lastTransitionTime":"2025-11-24T13:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.515830 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.515888 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.515905 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.515928 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.515945 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:46Z","lastTransitionTime":"2025-11-24T13:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.618716 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.619113 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.619337 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.619592 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.619876 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:46Z","lastTransitionTime":"2025-11-24T13:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.722721 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.722769 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.722782 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.722800 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.722814 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:46Z","lastTransitionTime":"2025-11-24T13:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.825019 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.825065 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.825080 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.825102 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.825118 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:46Z","lastTransitionTime":"2025-11-24T13:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.928281 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.928345 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.928385 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.928417 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:46 crc kubenswrapper[5039]: I1124 13:19:46.928440 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:46Z","lastTransitionTime":"2025-11-24T13:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.031865 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.031989 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.032011 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.032033 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.032051 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:47Z","lastTransitionTime":"2025-11-24T13:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.134763 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.134831 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.134848 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.134870 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.134886 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:47Z","lastTransitionTime":"2025-11-24T13:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.237486 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.237612 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.237643 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.237675 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.237699 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:47Z","lastTransitionTime":"2025-11-24T13:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.305659 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.305711 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:47 crc kubenswrapper[5039]: E1124 13:19:47.305827 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:47 crc kubenswrapper[5039]: E1124 13:19:47.305971 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.340981 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.341015 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.341024 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.341038 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.341048 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:47Z","lastTransitionTime":"2025-11-24T13:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.444005 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.444115 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.444136 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.444162 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.444179 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:47Z","lastTransitionTime":"2025-11-24T13:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.547126 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.547238 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.547256 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.547280 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.547299 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:47Z","lastTransitionTime":"2025-11-24T13:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.651220 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.651290 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.651314 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.651376 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.651396 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:47Z","lastTransitionTime":"2025-11-24T13:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.754434 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.754553 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.754569 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.754592 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.754607 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:47Z","lastTransitionTime":"2025-11-24T13:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.827480 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.827775 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.827866 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.827963 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.828053 5039 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T13:19:47Z","lastTransitionTime":"2025-11-24T13:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.892016 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-c5gtf"] Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.893260 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c5gtf" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.896375 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.896600 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.897379 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.898789 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 24 13:19:47 crc kubenswrapper[5039]: I1124 13:19:47.951959 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-kr94g" podStartSLOduration=74.951932158 podStartE2EDuration="1m14.951932158s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:19:47.951455125 +0000 UTC m=+100.390579635" watchObservedRunningTime="2025-11-24 13:19:47.951932158 +0000 UTC m=+100.391056688" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.002245 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1187783f-deba-4b64-99f1-a9ea92e2863b-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-c5gtf\" (UID: \"1187783f-deba-4b64-99f1-a9ea92e2863b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c5gtf" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.002331 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1187783f-deba-4b64-99f1-a9ea92e2863b-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-c5gtf\" (UID: \"1187783f-deba-4b64-99f1-a9ea92e2863b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c5gtf" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.002362 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1187783f-deba-4b64-99f1-a9ea92e2863b-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-c5gtf\" (UID: \"1187783f-deba-4b64-99f1-a9ea92e2863b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c5gtf" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.002428 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1187783f-deba-4b64-99f1-a9ea92e2863b-service-ca\") pod \"cluster-version-operator-5c965bbfc6-c5gtf\" (UID: \"1187783f-deba-4b64-99f1-a9ea92e2863b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c5gtf" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.002461 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1187783f-deba-4b64-99f1-a9ea92e2863b-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-c5gtf\" (UID: \"1187783f-deba-4b64-99f1-a9ea92e2863b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c5gtf" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.003578 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=80.003559659 podStartE2EDuration="1m20.003559659s" podCreationTimestamp="2025-11-24 13:18:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:19:47.982750333 +0000 UTC m=+100.421874873" watchObservedRunningTime="2025-11-24 13:19:48.003559659 +0000 UTC m=+100.442684169" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.032906 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=4.032883746 podStartE2EDuration="4.032883746s" podCreationTimestamp="2025-11-24 13:19:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:19:48.03186072 +0000 UTC m=+100.470985230" watchObservedRunningTime="2025-11-24 13:19:48.032883746 +0000 UTC m=+100.472008256" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.033213 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=79.033208045 podStartE2EDuration="1m19.033208045s" podCreationTimestamp="2025-11-24 13:18:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:19:48.002973374 +0000 UTC m=+100.442097934" watchObservedRunningTime="2025-11-24 13:19:48.033208045 +0000 UTC m=+100.472332555" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.103463 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1187783f-deba-4b64-99f1-a9ea92e2863b-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-c5gtf\" (UID: \"1187783f-deba-4b64-99f1-a9ea92e2863b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c5gtf" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.103501 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1187783f-deba-4b64-99f1-a9ea92e2863b-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-c5gtf\" (UID: \"1187783f-deba-4b64-99f1-a9ea92e2863b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c5gtf" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.103573 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1187783f-deba-4b64-99f1-a9ea92e2863b-service-ca\") pod \"cluster-version-operator-5c965bbfc6-c5gtf\" (UID: \"1187783f-deba-4b64-99f1-a9ea92e2863b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c5gtf" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.103591 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1187783f-deba-4b64-99f1-a9ea92e2863b-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-c5gtf\" (UID: \"1187783f-deba-4b64-99f1-a9ea92e2863b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c5gtf" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.103627 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1187783f-deba-4b64-99f1-a9ea92e2863b-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-c5gtf\" (UID: \"1187783f-deba-4b64-99f1-a9ea92e2863b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c5gtf" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.103694 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1187783f-deba-4b64-99f1-a9ea92e2863b-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-c5gtf\" (UID: \"1187783f-deba-4b64-99f1-a9ea92e2863b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c5gtf" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.103782 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1187783f-deba-4b64-99f1-a9ea92e2863b-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-c5gtf\" (UID: \"1187783f-deba-4b64-99f1-a9ea92e2863b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c5gtf" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.104560 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1187783f-deba-4b64-99f1-a9ea92e2863b-service-ca\") pod \"cluster-version-operator-5c965bbfc6-c5gtf\" (UID: \"1187783f-deba-4b64-99f1-a9ea92e2863b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c5gtf" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.111636 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1187783f-deba-4b64-99f1-a9ea92e2863b-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-c5gtf\" (UID: \"1187783f-deba-4b64-99f1-a9ea92e2863b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c5gtf" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.123790 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1187783f-deba-4b64-99f1-a9ea92e2863b-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-c5gtf\" (UID: \"1187783f-deba-4b64-99f1-a9ea92e2863b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c5gtf" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.222154 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c5gtf" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.305936 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.305981 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:48 crc kubenswrapper[5039]: E1124 13:19:48.307301 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:48 crc kubenswrapper[5039]: E1124 13:19:48.307471 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.826055 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c5gtf" event={"ID":"1187783f-deba-4b64-99f1-a9ea92e2863b","Type":"ContainerStarted","Data":"83c599caa41de04ffb765cccb5753b9a8a472facaae2dfd6f5e788e8d8016070"} Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.826120 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c5gtf" event={"ID":"1187783f-deba-4b64-99f1-a9ea92e2863b","Type":"ContainerStarted","Data":"d5f54f8ceec3000da77017aaba9484128f430a4c665fd535cd233eace673cb38"} Nov 24 13:19:48 crc kubenswrapper[5039]: I1124 13:19:48.846613 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c5gtf" podStartSLOduration=75.84657913 podStartE2EDuration="1m15.84657913s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:19:48.845093262 +0000 UTC m=+101.284217802" watchObservedRunningTime="2025-11-24 13:19:48.84657913 +0000 UTC m=+101.285703710" Nov 24 13:19:49 crc kubenswrapper[5039]: I1124 13:19:49.305928 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:49 crc kubenswrapper[5039]: I1124 13:19:49.306026 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:49 crc kubenswrapper[5039]: E1124 13:19:49.306136 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:49 crc kubenswrapper[5039]: E1124 13:19:49.306392 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:50 crc kubenswrapper[5039]: I1124 13:19:50.305745 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:50 crc kubenswrapper[5039]: I1124 13:19:50.305831 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:50 crc kubenswrapper[5039]: E1124 13:19:50.305951 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:50 crc kubenswrapper[5039]: E1124 13:19:50.306243 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:51 crc kubenswrapper[5039]: I1124 13:19:51.306576 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:51 crc kubenswrapper[5039]: I1124 13:19:51.306652 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:51 crc kubenswrapper[5039]: E1124 13:19:51.306727 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:51 crc kubenswrapper[5039]: E1124 13:19:51.306828 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:52 crc kubenswrapper[5039]: I1124 13:19:52.061951 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs\") pod \"network-metrics-daemon-vnpwt\" (UID: \"5926107d-81bc-4e34-9e27-8018cbccf590\") " pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:52 crc kubenswrapper[5039]: E1124 13:19:52.062237 5039 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 13:19:52 crc kubenswrapper[5039]: E1124 13:19:52.062385 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs podName:5926107d-81bc-4e34-9e27-8018cbccf590 nodeName:}" failed. No retries permitted until 2025-11-24 13:20:56.062349971 +0000 UTC m=+168.501474521 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs") pod "network-metrics-daemon-vnpwt" (UID: "5926107d-81bc-4e34-9e27-8018cbccf590") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 13:19:52 crc kubenswrapper[5039]: I1124 13:19:52.306063 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:52 crc kubenswrapper[5039]: I1124 13:19:52.306157 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:52 crc kubenswrapper[5039]: E1124 13:19:52.306247 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:52 crc kubenswrapper[5039]: E1124 13:19:52.306349 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:53 crc kubenswrapper[5039]: I1124 13:19:53.306402 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:53 crc kubenswrapper[5039]: I1124 13:19:53.306439 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:53 crc kubenswrapper[5039]: E1124 13:19:53.306867 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:53 crc kubenswrapper[5039]: E1124 13:19:53.306750 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:54 crc kubenswrapper[5039]: I1124 13:19:54.306108 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:54 crc kubenswrapper[5039]: I1124 13:19:54.306123 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:54 crc kubenswrapper[5039]: E1124 13:19:54.306285 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:54 crc kubenswrapper[5039]: E1124 13:19:54.306388 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:55 crc kubenswrapper[5039]: I1124 13:19:55.306058 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:55 crc kubenswrapper[5039]: E1124 13:19:55.306404 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:55 crc kubenswrapper[5039]: I1124 13:19:55.306419 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:55 crc kubenswrapper[5039]: E1124 13:19:55.306685 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:56 crc kubenswrapper[5039]: I1124 13:19:56.306224 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:56 crc kubenswrapper[5039]: I1124 13:19:56.306351 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:56 crc kubenswrapper[5039]: E1124 13:19:56.306544 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:56 crc kubenswrapper[5039]: E1124 13:19:56.306690 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:56 crc kubenswrapper[5039]: I1124 13:19:56.308003 5039 scope.go:117] "RemoveContainer" containerID="217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce" Nov 24 13:19:56 crc kubenswrapper[5039]: E1124 13:19:56.308276 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-w2ctb_openshift-ovn-kubernetes(54c05b03-6747-47bf-a40d-8a9332c4d856)\"" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" Nov 24 13:19:57 crc kubenswrapper[5039]: I1124 13:19:57.305951 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:57 crc kubenswrapper[5039]: I1124 13:19:57.306454 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:57 crc kubenswrapper[5039]: E1124 13:19:57.306676 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:57 crc kubenswrapper[5039]: E1124 13:19:57.307687 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:19:58 crc kubenswrapper[5039]: I1124 13:19:58.306328 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:19:58 crc kubenswrapper[5039]: I1124 13:19:58.306740 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:19:58 crc kubenswrapper[5039]: E1124 13:19:58.311167 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:19:58 crc kubenswrapper[5039]: E1124 13:19:58.311962 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:19:59 crc kubenswrapper[5039]: I1124 13:19:59.306443 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:19:59 crc kubenswrapper[5039]: I1124 13:19:59.306498 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:19:59 crc kubenswrapper[5039]: E1124 13:19:59.306734 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:19:59 crc kubenswrapper[5039]: E1124 13:19:59.306874 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:20:00 crc kubenswrapper[5039]: I1124 13:20:00.306001 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:20:00 crc kubenswrapper[5039]: I1124 13:20:00.306041 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:20:00 crc kubenswrapper[5039]: E1124 13:20:00.306288 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:20:00 crc kubenswrapper[5039]: E1124 13:20:00.306701 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:20:01 crc kubenswrapper[5039]: I1124 13:20:01.306204 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:20:01 crc kubenswrapper[5039]: I1124 13:20:01.306683 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:20:01 crc kubenswrapper[5039]: E1124 13:20:01.306870 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:20:01 crc kubenswrapper[5039]: E1124 13:20:01.307144 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:20:02 crc kubenswrapper[5039]: I1124 13:20:02.305749 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:20:02 crc kubenswrapper[5039]: I1124 13:20:02.305803 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:20:02 crc kubenswrapper[5039]: E1124 13:20:02.305980 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:20:02 crc kubenswrapper[5039]: E1124 13:20:02.306181 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:20:03 crc kubenswrapper[5039]: I1124 13:20:03.306115 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:20:03 crc kubenswrapper[5039]: E1124 13:20:03.306522 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:20:03 crc kubenswrapper[5039]: I1124 13:20:03.306163 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:20:03 crc kubenswrapper[5039]: E1124 13:20:03.306679 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:20:04 crc kubenswrapper[5039]: I1124 13:20:04.306101 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:20:04 crc kubenswrapper[5039]: E1124 13:20:04.306581 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:20:04 crc kubenswrapper[5039]: I1124 13:20:04.306102 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:20:04 crc kubenswrapper[5039]: E1124 13:20:04.307493 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:20:05 crc kubenswrapper[5039]: I1124 13:20:05.306046 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:20:05 crc kubenswrapper[5039]: I1124 13:20:05.306113 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:20:05 crc kubenswrapper[5039]: E1124 13:20:05.306178 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:20:05 crc kubenswrapper[5039]: E1124 13:20:05.306299 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:20:06 crc kubenswrapper[5039]: I1124 13:20:06.305780 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:20:06 crc kubenswrapper[5039]: I1124 13:20:06.305904 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:20:06 crc kubenswrapper[5039]: E1124 13:20:06.305972 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:20:06 crc kubenswrapper[5039]: E1124 13:20:06.306588 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:20:07 crc kubenswrapper[5039]: I1124 13:20:07.306777 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:20:07 crc kubenswrapper[5039]: I1124 13:20:07.306876 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:20:07 crc kubenswrapper[5039]: E1124 13:20:07.306938 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:20:07 crc kubenswrapper[5039]: E1124 13:20:07.307547 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:20:07 crc kubenswrapper[5039]: I1124 13:20:07.308154 5039 scope.go:117] "RemoveContainer" containerID="217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce" Nov 24 13:20:07 crc kubenswrapper[5039]: E1124 13:20:07.308447 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-w2ctb_openshift-ovn-kubernetes(54c05b03-6747-47bf-a40d-8a9332c4d856)\"" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" Nov 24 13:20:07 crc kubenswrapper[5039]: I1124 13:20:07.888162 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kr94g_6c18c830-d513-4df0-be92-cd44f2d2c5df/kube-multus/1.log" Nov 24 13:20:07 crc kubenswrapper[5039]: I1124 13:20:07.888964 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kr94g_6c18c830-d513-4df0-be92-cd44f2d2c5df/kube-multus/0.log" Nov 24 13:20:07 crc kubenswrapper[5039]: I1124 13:20:07.889048 5039 generic.go:334] "Generic (PLEG): container finished" podID="6c18c830-d513-4df0-be92-cd44f2d2c5df" containerID="8f68c347316af28eef4d9d661fff4ef8497e81704ecbdb6794e54ba842a37e20" exitCode=1 Nov 24 13:20:07 crc kubenswrapper[5039]: I1124 13:20:07.889100 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kr94g" event={"ID":"6c18c830-d513-4df0-be92-cd44f2d2c5df","Type":"ContainerDied","Data":"8f68c347316af28eef4d9d661fff4ef8497e81704ecbdb6794e54ba842a37e20"} Nov 24 13:20:07 crc kubenswrapper[5039]: I1124 13:20:07.889159 5039 scope.go:117] "RemoveContainer" containerID="a352ed0dc7ee4fb0e53c97e831c1ad43eb518b5c05d6a772b3d438ba908743c9" Nov 24 13:20:07 crc kubenswrapper[5039]: I1124 13:20:07.889789 5039 scope.go:117] "RemoveContainer" containerID="8f68c347316af28eef4d9d661fff4ef8497e81704ecbdb6794e54ba842a37e20" Nov 24 13:20:07 crc kubenswrapper[5039]: E1124 13:20:07.890087 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-kr94g_openshift-multus(6c18c830-d513-4df0-be92-cd44f2d2c5df)\"" pod="openshift-multus/multus-kr94g" podUID="6c18c830-d513-4df0-be92-cd44f2d2c5df" Nov 24 13:20:08 crc kubenswrapper[5039]: E1124 13:20:08.303879 5039 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 24 13:20:08 crc kubenswrapper[5039]: I1124 13:20:08.306371 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:20:08 crc kubenswrapper[5039]: E1124 13:20:08.307912 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:20:08 crc kubenswrapper[5039]: I1124 13:20:08.308007 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:20:08 crc kubenswrapper[5039]: E1124 13:20:08.308187 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:20:08 crc kubenswrapper[5039]: E1124 13:20:08.691029 5039 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 13:20:08 crc kubenswrapper[5039]: I1124 13:20:08.895468 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kr94g_6c18c830-d513-4df0-be92-cd44f2d2c5df/kube-multus/1.log" Nov 24 13:20:09 crc kubenswrapper[5039]: I1124 13:20:09.305883 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:20:09 crc kubenswrapper[5039]: E1124 13:20:09.306354 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:20:09 crc kubenswrapper[5039]: I1124 13:20:09.305907 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:20:09 crc kubenswrapper[5039]: E1124 13:20:09.307137 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:20:10 crc kubenswrapper[5039]: I1124 13:20:10.306411 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:20:10 crc kubenswrapper[5039]: E1124 13:20:10.307935 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:20:10 crc kubenswrapper[5039]: I1124 13:20:10.306428 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:20:10 crc kubenswrapper[5039]: E1124 13:20:10.308447 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:20:11 crc kubenswrapper[5039]: I1124 13:20:11.305800 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:20:11 crc kubenswrapper[5039]: E1124 13:20:11.306307 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:20:11 crc kubenswrapper[5039]: I1124 13:20:11.306573 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:20:11 crc kubenswrapper[5039]: E1124 13:20:11.307410 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:20:12 crc kubenswrapper[5039]: I1124 13:20:12.306099 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:20:12 crc kubenswrapper[5039]: E1124 13:20:12.306215 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:20:12 crc kubenswrapper[5039]: I1124 13:20:12.306379 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:20:12 crc kubenswrapper[5039]: E1124 13:20:12.306426 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:20:13 crc kubenswrapper[5039]: I1124 13:20:13.306238 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:20:13 crc kubenswrapper[5039]: I1124 13:20:13.306238 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:20:13 crc kubenswrapper[5039]: E1124 13:20:13.306474 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:20:13 crc kubenswrapper[5039]: E1124 13:20:13.306551 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:20:13 crc kubenswrapper[5039]: E1124 13:20:13.693167 5039 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 13:20:14 crc kubenswrapper[5039]: I1124 13:20:14.306306 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:20:14 crc kubenswrapper[5039]: I1124 13:20:14.306355 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:20:14 crc kubenswrapper[5039]: E1124 13:20:14.306643 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:20:14 crc kubenswrapper[5039]: E1124 13:20:14.306725 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:20:15 crc kubenswrapper[5039]: I1124 13:20:15.306678 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:20:15 crc kubenswrapper[5039]: I1124 13:20:15.306738 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:20:15 crc kubenswrapper[5039]: E1124 13:20:15.306813 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:20:15 crc kubenswrapper[5039]: E1124 13:20:15.306885 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:20:16 crc kubenswrapper[5039]: I1124 13:20:16.306702 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:20:16 crc kubenswrapper[5039]: I1124 13:20:16.306735 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:20:16 crc kubenswrapper[5039]: E1124 13:20:16.307876 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:20:16 crc kubenswrapper[5039]: E1124 13:20:16.307894 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:20:17 crc kubenswrapper[5039]: I1124 13:20:17.306646 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:20:17 crc kubenswrapper[5039]: I1124 13:20:17.306646 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:20:17 crc kubenswrapper[5039]: E1124 13:20:17.307057 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:20:17 crc kubenswrapper[5039]: E1124 13:20:17.306896 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:20:18 crc kubenswrapper[5039]: I1124 13:20:18.306593 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:20:18 crc kubenswrapper[5039]: I1124 13:20:18.306613 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:20:18 crc kubenswrapper[5039]: E1124 13:20:18.307578 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:20:18 crc kubenswrapper[5039]: E1124 13:20:18.307869 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:20:18 crc kubenswrapper[5039]: I1124 13:20:18.310352 5039 scope.go:117] "RemoveContainer" containerID="217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce" Nov 24 13:20:18 crc kubenswrapper[5039]: E1124 13:20:18.693683 5039 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 13:20:18 crc kubenswrapper[5039]: I1124 13:20:18.967054 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w2ctb_54c05b03-6747-47bf-a40d-8a9332c4d856/ovnkube-controller/3.log" Nov 24 13:20:18 crc kubenswrapper[5039]: I1124 13:20:18.969830 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerStarted","Data":"99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf"} Nov 24 13:20:18 crc kubenswrapper[5039]: I1124 13:20:18.970197 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:20:18 crc kubenswrapper[5039]: I1124 13:20:18.993488 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" podStartSLOduration=105.993473084 podStartE2EDuration="1m45.993473084s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:18.992869498 +0000 UTC m=+131.431993998" watchObservedRunningTime="2025-11-24 13:20:18.993473084 +0000 UTC m=+131.432597584" Nov 24 13:20:19 crc kubenswrapper[5039]: I1124 13:20:19.295336 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-vnpwt"] Nov 24 13:20:19 crc kubenswrapper[5039]: I1124 13:20:19.295454 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:20:19 crc kubenswrapper[5039]: E1124 13:20:19.295574 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:20:19 crc kubenswrapper[5039]: I1124 13:20:19.306149 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:20:19 crc kubenswrapper[5039]: E1124 13:20:19.306273 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:20:19 crc kubenswrapper[5039]: I1124 13:20:19.306340 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:20:19 crc kubenswrapper[5039]: E1124 13:20:19.306464 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:20:20 crc kubenswrapper[5039]: I1124 13:20:20.306278 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:20:20 crc kubenswrapper[5039]: E1124 13:20:20.308399 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:20:21 crc kubenswrapper[5039]: I1124 13:20:21.306799 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:20:21 crc kubenswrapper[5039]: I1124 13:20:21.306905 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:20:21 crc kubenswrapper[5039]: E1124 13:20:21.307211 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:20:21 crc kubenswrapper[5039]: E1124 13:20:21.307412 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:20:21 crc kubenswrapper[5039]: I1124 13:20:21.307739 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:20:21 crc kubenswrapper[5039]: E1124 13:20:21.308006 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:20:21 crc kubenswrapper[5039]: I1124 13:20:21.308048 5039 scope.go:117] "RemoveContainer" containerID="8f68c347316af28eef4d9d661fff4ef8497e81704ecbdb6794e54ba842a37e20" Nov 24 13:20:21 crc kubenswrapper[5039]: I1124 13:20:21.984810 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kr94g_6c18c830-d513-4df0-be92-cd44f2d2c5df/kube-multus/1.log" Nov 24 13:20:21 crc kubenswrapper[5039]: I1124 13:20:21.985220 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kr94g" event={"ID":"6c18c830-d513-4df0-be92-cd44f2d2c5df","Type":"ContainerStarted","Data":"afbc25e2b688679dbfe2c40bce4636e6482ec1605d671d0aa8e10a779e2f545a"} Nov 24 13:20:22 crc kubenswrapper[5039]: I1124 13:20:22.306212 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:20:22 crc kubenswrapper[5039]: E1124 13:20:22.306436 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 13:20:23 crc kubenswrapper[5039]: I1124 13:20:23.306594 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:20:23 crc kubenswrapper[5039]: I1124 13:20:23.306640 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:20:23 crc kubenswrapper[5039]: E1124 13:20:23.306732 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 13:20:23 crc kubenswrapper[5039]: I1124 13:20:23.306783 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:20:23 crc kubenswrapper[5039]: E1124 13:20:23.306845 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vnpwt" podUID="5926107d-81bc-4e34-9e27-8018cbccf590" Nov 24 13:20:23 crc kubenswrapper[5039]: E1124 13:20:23.307102 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 13:20:24 crc kubenswrapper[5039]: I1124 13:20:24.306723 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:20:24 crc kubenswrapper[5039]: I1124 13:20:24.311228 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 24 13:20:24 crc kubenswrapper[5039]: I1124 13:20:24.312477 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 24 13:20:25 crc kubenswrapper[5039]: I1124 13:20:25.305864 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:20:25 crc kubenswrapper[5039]: I1124 13:20:25.305864 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:20:25 crc kubenswrapper[5039]: I1124 13:20:25.306039 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:20:25 crc kubenswrapper[5039]: I1124 13:20:25.309123 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 24 13:20:25 crc kubenswrapper[5039]: I1124 13:20:25.309713 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 24 13:20:25 crc kubenswrapper[5039]: I1124 13:20:25.310667 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 24 13:20:25 crc kubenswrapper[5039]: I1124 13:20:25.317675 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.345784 5039 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.395606 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-djx5v"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.396435 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-djx5v" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.400484 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.400563 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.404080 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-56bpb"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.405177 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-56bpb" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.405332 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.405370 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.405430 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.406010 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mx8zv"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.406699 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.406912 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mx8zv" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.408058 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gj47k"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.408420 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gj47k" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.411959 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.412316 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-n2dwh"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.412621 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.412911 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.413279 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.413585 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-n2dwh" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.417949 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-9jvgw"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.419008 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5dflw"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.419239 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.420021 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-c886j"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.420355 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-c886j" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.421885 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5dflw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.422803 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-98dp9"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.423085 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-98dp9" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.423723 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.424250 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.424460 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-zqgfl"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.424807 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.424909 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.424977 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.425028 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.435243 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.425080 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.425156 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.425209 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.425254 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.425465 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.425537 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.425731 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.435683 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xgtj9"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.435678 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.425733 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.425804 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.425803 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.435760 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.425857 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.425893 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.425983 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.425999 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.426036 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.426079 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.426084 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.426214 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.426252 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.426263 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.426289 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.426278 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.426313 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.426425 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.436704 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-blxxm"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.436896 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.437228 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-b8b2f"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.426746 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.437702 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-k4227"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.437828 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.427862 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.427937 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.428059 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.428207 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.430991 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.431065 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.432088 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.453825 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.455414 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.456290 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.457099 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.462701 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-blxxm" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.463015 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.476828 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.478467 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.481142 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2swfx"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.480341 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.480389 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.481264 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.482007 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.482090 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.484530 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.484544 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.484638 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.485730 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.486716 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.486802 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.486842 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.486869 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.486947 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.487001 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.487028 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.487056 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.487191 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.487742 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.487797 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.481234 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-k4227" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.495609 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.495669 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.496090 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.496581 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.496617 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.496709 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.496931 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.497421 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.497626 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.497836 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.497921 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.497958 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.498038 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.498101 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.498153 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.498383 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.499049 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-9dq5z"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.499590 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-4hh9n"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.499921 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.499966 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gj47k"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.499997 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-n2dwh"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.500010 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-t958r"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.500323 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jbfln"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.501732 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.502333 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9dq5z" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.502802 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-4hh9n" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.504557 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510180 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6decbe9-edda-413b-b067-665ccf6efece-config\") pod \"route-controller-manager-6576b87f9c-lv5c7\" (UID: \"a6decbe9-edda-413b-b067-665ccf6efece\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510231 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/bc7f5002-5906-428a-bb9e-c3507cc151c8-etcd-serving-ca\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510258 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/bc7f5002-5906-428a-bb9e-c3507cc151c8-image-import-ca\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510280 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c4c9ff6e-a421-43d0-ac49-c398640d3677-serving-cert\") pod \"authentication-operator-69f744f599-djx5v\" (UID: \"c4c9ff6e-a421-43d0-ac49-c398640d3677\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djx5v" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510303 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a9b8d64c-e7f3-4751-865c-c162aab7badd-serving-cert\") pod \"console-operator-58897d9998-c886j\" (UID: \"a9b8d64c-e7f3-4751-865c-c162aab7badd\") " pod="openshift-console-operator/console-operator-58897d9998-c886j" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510321 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/e04521cd-f63c-40ba-a296-a34d7ce739d7-machine-approver-tls\") pod \"machine-approver-56656f9798-56bpb\" (UID: \"e04521cd-f63c-40ba-a296-a34d7ce739d7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-56bpb" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510335 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e04521cd-f63c-40ba-a296-a34d7ce739d7-config\") pod \"machine-approver-56656f9798-56bpb\" (UID: \"e04521cd-f63c-40ba-a296-a34d7ce739d7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-56bpb" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510356 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a6decbe9-edda-413b-b067-665ccf6efece-client-ca\") pod \"route-controller-manager-6576b87f9c-lv5c7\" (UID: \"a6decbe9-edda-413b-b067-665ccf6efece\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510372 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9b8d64c-e7f3-4751-865c-c162aab7badd-config\") pod \"console-operator-58897d9998-c886j\" (UID: \"a9b8d64c-e7f3-4751-865c-c162aab7badd\") " pod="openshift-console-operator/console-operator-58897d9998-c886j" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510396 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/95afc2a3-7cfb-4a24-b555-5e8c0d21e044-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-5dflw\" (UID: \"95afc2a3-7cfb-4a24-b555-5e8c0d21e044\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5dflw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510415 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a6decbe9-edda-413b-b067-665ccf6efece-serving-cert\") pod \"route-controller-manager-6576b87f9c-lv5c7\" (UID: \"a6decbe9-edda-413b-b067-665ccf6efece\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510432 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bc7f5002-5906-428a-bb9e-c3507cc151c8-etcd-client\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510466 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/021285de-f14a-481e-986e-8d07616865a2-config\") pod \"openshift-apiserver-operator-796bbdcf4f-gj47k\" (UID: \"021285de-f14a-481e-986e-8d07616865a2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gj47k" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510494 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc7f5002-5906-428a-bb9e-c3507cc151c8-serving-cert\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510530 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-425dx\" (UniqueName: \"kubernetes.io/projected/f1d33349-6b41-43a0-9aa9-03084435fd75-kube-api-access-425dx\") pod \"cluster-samples-operator-665b6dd947-mx8zv\" (UID: \"f1d33349-6b41-43a0-9aa9-03084435fd75\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mx8zv" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510551 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/95afc2a3-7cfb-4a24-b555-5e8c0d21e044-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-5dflw\" (UID: \"95afc2a3-7cfb-4a24-b555-5e8c0d21e044\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5dflw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510570 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtjb7\" (UniqueName: \"kubernetes.io/projected/bc7f5002-5906-428a-bb9e-c3507cc151c8-kube-api-access-dtjb7\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510587 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vtfxm\" (UniqueName: \"kubernetes.io/projected/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-kube-api-access-vtfxm\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510623 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c4c9ff6e-a421-43d0-ac49-c398640d3677-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-djx5v\" (UID: \"c4c9ff6e-a421-43d0-ac49-c398640d3677\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djx5v" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510672 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4gph\" (UniqueName: \"kubernetes.io/projected/e04521cd-f63c-40ba-a296-a34d7ce739d7-kube-api-access-v4gph\") pod \"machine-approver-56656f9798-56bpb\" (UID: \"e04521cd-f63c-40ba-a296-a34d7ce739d7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-56bpb" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510712 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c4c9ff6e-a421-43d0-ac49-c398640d3677-service-ca-bundle\") pod \"authentication-operator-69f744f599-djx5v\" (UID: \"c4c9ff6e-a421-43d0-ac49-c398640d3677\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djx5v" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510740 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4c9ff6e-a421-43d0-ac49-c398640d3677-config\") pod \"authentication-operator-69f744f599-djx5v\" (UID: \"c4c9ff6e-a421-43d0-ac49-c398640d3677\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djx5v" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510758 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f1d33349-6b41-43a0-9aa9-03084435fd75-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-mx8zv\" (UID: \"f1d33349-6b41-43a0-9aa9-03084435fd75\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mx8zv" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510774 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-encryption-config\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510791 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2flbf\" (UniqueName: \"kubernetes.io/projected/a6decbe9-edda-413b-b067-665ccf6efece-kube-api-access-2flbf\") pod \"route-controller-manager-6576b87f9c-lv5c7\" (UID: \"a6decbe9-edda-413b-b067-665ccf6efece\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510809 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510829 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/bc7f5002-5906-428a-bb9e-c3507cc151c8-node-pullsecrets\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510847 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/021285de-f14a-481e-986e-8d07616865a2-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-gj47k\" (UID: \"021285de-f14a-481e-986e-8d07616865a2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gj47k" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510867 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/bc7f5002-5906-428a-bb9e-c3507cc151c8-audit-dir\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510888 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4rsd\" (UniqueName: \"kubernetes.io/projected/c7032d1d-5aae-4e50-b10f-3df40a0cd983-kube-api-access-d4rsd\") pod \"downloads-7954f5f757-98dp9\" (UID: \"c7032d1d-5aae-4e50-b10f-3df40a0cd983\") " pod="openshift-console/downloads-7954f5f757-98dp9" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510907 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/ae5ca663-7edb-49dd-a7a7-668eeace13f7-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-n2dwh\" (UID: \"ae5ca663-7edb-49dd-a7a7-668eeace13f7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-n2dwh" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510924 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a9b8d64c-e7f3-4751-865c-c162aab7badd-trusted-ca\") pod \"console-operator-58897d9998-c886j\" (UID: \"a9b8d64c-e7f3-4751-865c-c162aab7badd\") " pod="openshift-console-operator/console-operator-58897d9998-c886j" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510941 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n5xgs\" (UniqueName: \"kubernetes.io/projected/95afc2a3-7cfb-4a24-b555-5e8c0d21e044-kube-api-access-n5xgs\") pod \"cluster-image-registry-operator-dc59b4c8b-5dflw\" (UID: \"95afc2a3-7cfb-4a24-b555-5e8c0d21e044\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5dflw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510960 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlg79\" (UniqueName: \"kubernetes.io/projected/c4c9ff6e-a421-43d0-ac49-c398640d3677-kube-api-access-nlg79\") pod \"authentication-operator-69f744f599-djx5v\" (UID: \"c4c9ff6e-a421-43d0-ac49-c398640d3677\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djx5v" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510978 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc7f5002-5906-428a-bb9e-c3507cc151c8-config\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.510995 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.511015 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/bc7f5002-5906-428a-bb9e-c3507cc151c8-encryption-config\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.511033 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnwxs\" (UniqueName: \"kubernetes.io/projected/a9b8d64c-e7f3-4751-865c-c162aab7badd-kube-api-access-qnwxs\") pod \"console-operator-58897d9998-c886j\" (UID: \"a9b8d64c-e7f3-4751-865c-c162aab7badd\") " pod="openshift-console-operator/console-operator-58897d9998-c886j" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.511051 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae5ca663-7edb-49dd-a7a7-668eeace13f7-config\") pod \"machine-api-operator-5694c8668f-n2dwh\" (UID: \"ae5ca663-7edb-49dd-a7a7-668eeace13f7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-n2dwh" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.511069 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/95afc2a3-7cfb-4a24-b555-5e8c0d21e044-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-5dflw\" (UID: \"95afc2a3-7cfb-4a24-b555-5e8c0d21e044\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5dflw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.511085 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-audit-policies\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.511102 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-etcd-client\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.511122 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v46xk\" (UniqueName: \"kubernetes.io/projected/021285de-f14a-481e-986e-8d07616865a2-kube-api-access-v46xk\") pod \"openshift-apiserver-operator-796bbdcf4f-gj47k\" (UID: \"021285de-f14a-481e-986e-8d07616865a2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gj47k" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.511137 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/bc7f5002-5906-428a-bb9e-c3507cc151c8-audit\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.511154 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-serving-cert\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.511171 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-audit-dir\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.511187 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/ae5ca663-7edb-49dd-a7a7-668eeace13f7-images\") pod \"machine-api-operator-5694c8668f-n2dwh\" (UID: \"ae5ca663-7edb-49dd-a7a7-668eeace13f7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-n2dwh" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.511201 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bc7f5002-5906-428a-bb9e-c3507cc151c8-trusted-ca-bundle\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.511216 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e04521cd-f63c-40ba-a296-a34d7ce739d7-auth-proxy-config\") pod \"machine-approver-56656f9798-56bpb\" (UID: \"e04521cd-f63c-40ba-a296-a34d7ce739d7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-56bpb" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.511235 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbfxg\" (UniqueName: \"kubernetes.io/projected/ae5ca663-7edb-49dd-a7a7-668eeace13f7-kube-api-access-sbfxg\") pod \"machine-api-operator-5694c8668f-n2dwh\" (UID: \"ae5ca663-7edb-49dd-a7a7-668eeace13f7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-n2dwh" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.512728 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-t958r" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.513253 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jbfln" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.519806 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.523833 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.524422 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.524964 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.525206 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.525655 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.525726 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.525673 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.528165 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-x9lf7"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.529061 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-x9lf7" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.533865 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-zf42k"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.534916 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-zf42k" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.552746 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.556178 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-2l986"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.560467 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.580377 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.580887 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.581452 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.582235 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.583747 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-6kn6v"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.584308 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2l986" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.584346 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gkvs9"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.584439 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6kn6v" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.585273 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-kln9b"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.585714 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.585982 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gkvs9" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.586318 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xgtj9"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.587014 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.587694 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.589484 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mx8zv"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.590567 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-98dp9"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.591174 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.589693 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.592557 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-c886j"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.593921 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.595086 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5dflw"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.595744 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-k884h"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.596626 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-k884h" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.597562 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.598493 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bmdnh"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.598979 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bmdnh" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.599207 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8t5gh"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.599433 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.599809 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8t5gh" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.600674 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-blxxm"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.602450 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.603166 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-2l986"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.603275 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.604076 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-9jvgw"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.609529 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2swfx"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.610654 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-md284"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.611164 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-md284" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.611798 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6e075f32-2803-4a2e-bf1a-0b1858adabf0-metrics-tls\") pod \"ingress-operator-5b745b69d9-9dq5z\" (UID: \"6e075f32-2803-4a2e-bf1a-0b1858adabf0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9dq5z" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.611825 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.611845 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0883a675-24bc-4d9b-b318-feee05e49135-client-ca\") pod \"controller-manager-879f6c89f-xgtj9\" (UID: \"0883a675-24bc-4d9b-b318-feee05e49135\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.611860 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/25637e2c-a1e3-4449-a549-7b081d0c4c4c-metrics-certs\") pod \"router-default-5444994796-4hh9n\" (UID: \"25637e2c-a1e3-4449-a549-7b081d0c4c4c\") " pod="openshift-ingress/router-default-5444994796-4hh9n" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.611886 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.611902 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/25637e2c-a1e3-4449-a549-7b081d0c4c4c-service-ca-bundle\") pod \"router-default-5444994796-4hh9n\" (UID: \"25637e2c-a1e3-4449-a549-7b081d0c4c4c\") " pod="openshift-ingress/router-default-5444994796-4hh9n" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.611916 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6e075f32-2803-4a2e-bf1a-0b1858adabf0-trusted-ca\") pod \"ingress-operator-5b745b69d9-9dq5z\" (UID: \"6e075f32-2803-4a2e-bf1a-0b1858adabf0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9dq5z" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.611932 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0883a675-24bc-4d9b-b318-feee05e49135-config\") pod \"controller-manager-879f6c89f-xgtj9\" (UID: \"0883a675-24bc-4d9b-b318-feee05e49135\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.611951 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f7a5c45-edc8-443e-9730-f7a2eb5ab116-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-t958r\" (UID: \"5f7a5c45-edc8-443e-9730-f7a2eb5ab116\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-t958r" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.611977 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6e075f32-2803-4a2e-bf1a-0b1858adabf0-bound-sa-token\") pod \"ingress-operator-5b745b69d9-9dq5z\" (UID: \"6e075f32-2803-4a2e-bf1a-0b1858adabf0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9dq5z" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.611995 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6x2n\" (UniqueName: \"kubernetes.io/projected/0883a675-24bc-4d9b-b318-feee05e49135-kube-api-access-z6x2n\") pod \"controller-manager-879f6c89f-xgtj9\" (UID: \"0883a675-24bc-4d9b-b318-feee05e49135\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.612012 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wx67\" (UniqueName: \"kubernetes.io/projected/b073719c-394b-496f-9d64-75681184acb0-kube-api-access-6wx67\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.612058 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-6gfhg"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.612161 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/bc7f5002-5906-428a-bb9e-c3507cc151c8-node-pullsecrets\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.612282 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/021285de-f14a-481e-986e-8d07616865a2-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-gj47k\" (UID: \"021285de-f14a-481e-986e-8d07616865a2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gj47k" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.612345 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrznn\" (UniqueName: \"kubernetes.io/projected/1f41fa87-ea96-4457-92a1-8bb69acc8b0e-kube-api-access-nrznn\") pod \"multus-admission-controller-857f4d67dd-zf42k\" (UID: \"1f41fa87-ea96-4457-92a1-8bb69acc8b0e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-zf42k" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.612395 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/bc7f5002-5906-428a-bb9e-c3507cc151c8-audit-dir\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.612426 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/ae5ca663-7edb-49dd-a7a7-668eeace13f7-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-n2dwh\" (UID: \"ae5ca663-7edb-49dd-a7a7-668eeace13f7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-n2dwh" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.612448 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-6gfhg" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.612452 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4rsd\" (UniqueName: \"kubernetes.io/projected/c7032d1d-5aae-4e50-b10f-3df40a0cd983-kube-api-access-d4rsd\") pod \"downloads-7954f5f757-98dp9\" (UID: \"c7032d1d-5aae-4e50-b10f-3df40a0cd983\") " pod="openshift-console/downloads-7954f5f757-98dp9" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.612481 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/bc7f5002-5906-428a-bb9e-c3507cc151c8-node-pullsecrets\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.612639 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0883a675-24bc-4d9b-b318-feee05e49135-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-xgtj9\" (UID: \"0883a675-24bc-4d9b-b318-feee05e49135\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.612789 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.612924 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a9b8d64c-e7f3-4751-865c-c162aab7badd-trusted-ca\") pod \"console-operator-58897d9998-c886j\" (UID: \"a9b8d64c-e7f3-4751-865c-c162aab7badd\") " pod="openshift-console-operator/console-operator-58897d9998-c886j" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.612933 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/bc7f5002-5906-428a-bb9e-c3507cc151c8-audit-dir\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.612963 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n5xgs\" (UniqueName: \"kubernetes.io/projected/95afc2a3-7cfb-4a24-b555-5e8c0d21e044-kube-api-access-n5xgs\") pod \"cluster-image-registry-operator-dc59b4c8b-5dflw\" (UID: \"95afc2a3-7cfb-4a24-b555-5e8c0d21e044\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5dflw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.613091 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlg79\" (UniqueName: \"kubernetes.io/projected/c4c9ff6e-a421-43d0-ac49-c398640d3677-kube-api-access-nlg79\") pod \"authentication-operator-69f744f599-djx5v\" (UID: \"c4c9ff6e-a421-43d0-ac49-c398640d3677\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djx5v" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.613128 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.613149 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.613183 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc7f5002-5906-428a-bb9e-c3507cc151c8-config\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.613207 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.613566 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wpp5\" (UniqueName: \"kubernetes.io/projected/25637e2c-a1e3-4449-a549-7b081d0c4c4c-kube-api-access-4wpp5\") pod \"router-default-5444994796-4hh9n\" (UID: \"25637e2c-a1e3-4449-a549-7b081d0c4c4c\") " pod="openshift-ingress/router-default-5444994796-4hh9n" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.613599 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.613623 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae5ca663-7edb-49dd-a7a7-668eeace13f7-config\") pod \"machine-api-operator-5694c8668f-n2dwh\" (UID: \"ae5ca663-7edb-49dd-a7a7-668eeace13f7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-n2dwh" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.613643 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/bc7f5002-5906-428a-bb9e-c3507cc151c8-encryption-config\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.614001 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.614363 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a9b8d64c-e7f3-4751-865c-c162aab7badd-trusted-ca\") pod \"console-operator-58897d9998-c886j\" (UID: \"a9b8d64c-e7f3-4751-865c-c162aab7badd\") " pod="openshift-console-operator/console-operator-58897d9998-c886j" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.614543 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc7f5002-5906-428a-bb9e-c3507cc151c8-config\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.614589 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-krwk4"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.615209 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-krwk4" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.616432 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnwxs\" (UniqueName: \"kubernetes.io/projected/a9b8d64c-e7f3-4751-865c-c162aab7badd-kube-api-access-qnwxs\") pod \"console-operator-58897d9998-c886j\" (UID: \"a9b8d64c-e7f3-4751-865c-c162aab7badd\") " pod="openshift-console-operator/console-operator-58897d9998-c886j" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.616725 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae5ca663-7edb-49dd-a7a7-668eeace13f7-config\") pod \"machine-api-operator-5694c8668f-n2dwh\" (UID: \"ae5ca663-7edb-49dd-a7a7-668eeace13f7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-n2dwh" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.616909 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/95afc2a3-7cfb-4a24-b555-5e8c0d21e044-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-5dflw\" (UID: \"95afc2a3-7cfb-4a24-b555-5e8c0d21e044\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5dflw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.616965 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/25637e2c-a1e3-4449-a549-7b081d0c4c4c-default-certificate\") pod \"router-default-5444994796-4hh9n\" (UID: \"25637e2c-a1e3-4449-a549-7b081d0c4c4c\") " pod="openshift-ingress/router-default-5444994796-4hh9n" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.617028 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-audit-policies\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.617063 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-audit-policies\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.617088 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-etcd-client\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.617115 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.617138 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glm6q\" (UniqueName: \"kubernetes.io/projected/32c545f6-2f66-4212-a7d0-01eab2f40da7-kube-api-access-glm6q\") pod \"openshift-config-operator-7777fb866f-6g9d2\" (UID: \"32c545f6-2f66-4212-a7d0-01eab2f40da7\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.617165 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v46xk\" (UniqueName: \"kubernetes.io/projected/021285de-f14a-481e-986e-8d07616865a2-kube-api-access-v46xk\") pod \"openshift-apiserver-operator-796bbdcf4f-gj47k\" (UID: \"021285de-f14a-481e-986e-8d07616865a2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gj47k" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.617186 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/bc7f5002-5906-428a-bb9e-c3507cc151c8-audit\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.617206 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e04521cd-f63c-40ba-a296-a34d7ce739d7-auth-proxy-config\") pod \"machine-approver-56656f9798-56bpb\" (UID: \"e04521cd-f63c-40ba-a296-a34d7ce739d7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-56bpb" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.617228 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-serving-cert\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.617251 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-audit-dir\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.617279 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/ae5ca663-7edb-49dd-a7a7-668eeace13f7-images\") pod \"machine-api-operator-5694c8668f-n2dwh\" (UID: \"ae5ca663-7edb-49dd-a7a7-668eeace13f7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-n2dwh" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.617815 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bc7f5002-5906-428a-bb9e-c3507cc151c8-trusted-ca-bundle\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.617856 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b073719c-394b-496f-9d64-75681184acb0-audit-dir\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.617888 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbfxg\" (UniqueName: \"kubernetes.io/projected/ae5ca663-7edb-49dd-a7a7-668eeace13f7-kube-api-access-sbfxg\") pod \"machine-api-operator-5694c8668f-n2dwh\" (UID: \"ae5ca663-7edb-49dd-a7a7-668eeace13f7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-n2dwh" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.617912 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c4c9ff6e-a421-43d0-ac49-c398640d3677-serving-cert\") pod \"authentication-operator-69f744f599-djx5v\" (UID: \"c4c9ff6e-a421-43d0-ac49-c398640d3677\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djx5v" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.617937 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6decbe9-edda-413b-b067-665ccf6efece-config\") pod \"route-controller-manager-6576b87f9c-lv5c7\" (UID: \"a6decbe9-edda-413b-b067-665ccf6efece\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.617960 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.617990 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/bc7f5002-5906-428a-bb9e-c3507cc151c8-etcd-serving-ca\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.618012 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/bc7f5002-5906-428a-bb9e-c3507cc151c8-image-import-ca\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.618039 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/32c545f6-2f66-4212-a7d0-01eab2f40da7-available-featuregates\") pod \"openshift-config-operator-7777fb866f-6g9d2\" (UID: \"32c545f6-2f66-4212-a7d0-01eab2f40da7\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.618062 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a6decbe9-edda-413b-b067-665ccf6efece-client-ca\") pod \"route-controller-manager-6576b87f9c-lv5c7\" (UID: \"a6decbe9-edda-413b-b067-665ccf6efece\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.618138 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.618196 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a9b8d64c-e7f3-4751-865c-c162aab7badd-serving-cert\") pod \"console-operator-58897d9998-c886j\" (UID: \"a9b8d64c-e7f3-4751-865c-c162aab7badd\") " pod="openshift-console-operator/console-operator-58897d9998-c886j" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620060 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/e04521cd-f63c-40ba-a296-a34d7ce739d7-machine-approver-tls\") pod \"machine-approver-56656f9798-56bpb\" (UID: \"e04521cd-f63c-40ba-a296-a34d7ce739d7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-56bpb" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620098 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e04521cd-f63c-40ba-a296-a34d7ce739d7-config\") pod \"machine-approver-56656f9798-56bpb\" (UID: \"e04521cd-f63c-40ba-a296-a34d7ce739d7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-56bpb" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620133 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a6decbe9-edda-413b-b067-665ccf6efece-serving-cert\") pod \"route-controller-manager-6576b87f9c-lv5c7\" (UID: \"a6decbe9-edda-413b-b067-665ccf6efece\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620156 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620181 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9b8d64c-e7f3-4751-865c-c162aab7badd-config\") pod \"console-operator-58897d9998-c886j\" (UID: \"a9b8d64c-e7f3-4751-865c-c162aab7badd\") " pod="openshift-console-operator/console-operator-58897d9998-c886j" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620183 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bc7f5002-5906-428a-bb9e-c3507cc151c8-trusted-ca-bundle\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620199 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/95afc2a3-7cfb-4a24-b555-5e8c0d21e044-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-5dflw\" (UID: \"95afc2a3-7cfb-4a24-b555-5e8c0d21e044\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5dflw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620217 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/021285de-f14a-481e-986e-8d07616865a2-config\") pod \"openshift-apiserver-operator-796bbdcf4f-gj47k\" (UID: \"021285de-f14a-481e-986e-8d07616865a2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gj47k" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620234 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bc7f5002-5906-428a-bb9e-c3507cc151c8-etcd-client\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620253 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f7a5c45-edc8-443e-9730-f7a2eb5ab116-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-t958r\" (UID: \"5f7a5c45-edc8-443e-9730-f7a2eb5ab116\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-t958r" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620291 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dv55\" (UniqueName: \"kubernetes.io/projected/6e075f32-2803-4a2e-bf1a-0b1858adabf0-kube-api-access-6dv55\") pod \"ingress-operator-5b745b69d9-9dq5z\" (UID: \"6e075f32-2803-4a2e-bf1a-0b1858adabf0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9dq5z" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620317 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-425dx\" (UniqueName: \"kubernetes.io/projected/f1d33349-6b41-43a0-9aa9-03084435fd75-kube-api-access-425dx\") pod \"cluster-samples-operator-665b6dd947-mx8zv\" (UID: \"f1d33349-6b41-43a0-9aa9-03084435fd75\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mx8zv" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620333 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/25637e2c-a1e3-4449-a549-7b081d0c4c4c-stats-auth\") pod \"router-default-5444994796-4hh9n\" (UID: \"25637e2c-a1e3-4449-a549-7b081d0c4c4c\") " pod="openshift-ingress/router-default-5444994796-4hh9n" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620350 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620378 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc7f5002-5906-428a-bb9e-c3507cc151c8-serving-cert\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620395 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/95afc2a3-7cfb-4a24-b555-5e8c0d21e044-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-5dflw\" (UID: \"95afc2a3-7cfb-4a24-b555-5e8c0d21e044\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5dflw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620413 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtjb7\" (UniqueName: \"kubernetes.io/projected/bc7f5002-5906-428a-bb9e-c3507cc151c8-kube-api-access-dtjb7\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620428 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vtfxm\" (UniqueName: \"kubernetes.io/projected/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-kube-api-access-vtfxm\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620446 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/1f41fa87-ea96-4457-92a1-8bb69acc8b0e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-zf42k\" (UID: \"1f41fa87-ea96-4457-92a1-8bb69acc8b0e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-zf42k" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620473 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620490 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620532 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c4c9ff6e-a421-43d0-ac49-c398640d3677-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-djx5v\" (UID: \"c4c9ff6e-a421-43d0-ac49-c398640d3677\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djx5v" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620549 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5f7a5c45-edc8-443e-9730-f7a2eb5ab116-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-t958r\" (UID: \"5f7a5c45-edc8-443e-9730-f7a2eb5ab116\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-t958r" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620576 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0883a675-24bc-4d9b-b318-feee05e49135-serving-cert\") pod \"controller-manager-879f6c89f-xgtj9\" (UID: \"0883a675-24bc-4d9b-b318-feee05e49135\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620595 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/32c545f6-2f66-4212-a7d0-01eab2f40da7-serving-cert\") pod \"openshift-config-operator-7777fb866f-6g9d2\" (UID: \"32c545f6-2f66-4212-a7d0-01eab2f40da7\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620616 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4gph\" (UniqueName: \"kubernetes.io/projected/e04521cd-f63c-40ba-a296-a34d7ce739d7-kube-api-access-v4gph\") pod \"machine-approver-56656f9798-56bpb\" (UID: \"e04521cd-f63c-40ba-a296-a34d7ce739d7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-56bpb" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620633 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c4c9ff6e-a421-43d0-ac49-c398640d3677-service-ca-bundle\") pod \"authentication-operator-69f744f599-djx5v\" (UID: \"c4c9ff6e-a421-43d0-ac49-c398640d3677\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djx5v" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620663 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4c9ff6e-a421-43d0-ac49-c398640d3677-config\") pod \"authentication-operator-69f744f599-djx5v\" (UID: \"c4c9ff6e-a421-43d0-ac49-c398640d3677\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djx5v" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620680 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f1d33349-6b41-43a0-9aa9-03084435fd75-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-mx8zv\" (UID: \"f1d33349-6b41-43a0-9aa9-03084435fd75\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mx8zv" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620698 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-encryption-config\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.620717 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2flbf\" (UniqueName: \"kubernetes.io/projected/a6decbe9-edda-413b-b067-665ccf6efece-kube-api-access-2flbf\") pod \"route-controller-manager-6576b87f9c-lv5c7\" (UID: \"a6decbe9-edda-413b-b067-665ccf6efece\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.618355 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-audit-policies\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.621123 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/bc7f5002-5906-428a-bb9e-c3507cc151c8-etcd-serving-ca\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.622233 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e04521cd-f63c-40ba-a296-a34d7ce739d7-auth-proxy-config\") pod \"machine-approver-56656f9798-56bpb\" (UID: \"e04521cd-f63c-40ba-a296-a34d7ce739d7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-56bpb" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.623916 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/95afc2a3-7cfb-4a24-b555-5e8c0d21e044-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-5dflw\" (UID: \"95afc2a3-7cfb-4a24-b555-5e8c0d21e044\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5dflw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.618373 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-audit-dir\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.624311 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e04521cd-f63c-40ba-a296-a34d7ce739d7-config\") pod \"machine-approver-56656f9798-56bpb\" (UID: \"e04521cd-f63c-40ba-a296-a34d7ce739d7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-56bpb" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.619421 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/ae5ca663-7edb-49dd-a7a7-668eeace13f7-images\") pod \"machine-api-operator-5694c8668f-n2dwh\" (UID: \"ae5ca663-7edb-49dd-a7a7-668eeace13f7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-n2dwh" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.624595 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/bc7f5002-5906-428a-bb9e-c3507cc151c8-image-import-ca\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.625340 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c4c9ff6e-a421-43d0-ac49-c398640d3677-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-djx5v\" (UID: \"c4c9ff6e-a421-43d0-ac49-c398640d3677\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djx5v" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.619561 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6decbe9-edda-413b-b067-665ccf6efece-config\") pod \"route-controller-manager-6576b87f9c-lv5c7\" (UID: \"a6decbe9-edda-413b-b067-665ccf6efece\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.625974 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/021285de-f14a-481e-986e-8d07616865a2-config\") pod \"openshift-apiserver-operator-796bbdcf4f-gj47k\" (UID: \"021285de-f14a-481e-986e-8d07616865a2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gj47k" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.626414 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a6decbe9-edda-413b-b067-665ccf6efece-client-ca\") pod \"route-controller-manager-6576b87f9c-lv5c7\" (UID: \"a6decbe9-edda-413b-b067-665ccf6efece\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.627408 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/ae5ca663-7edb-49dd-a7a7-668eeace13f7-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-n2dwh\" (UID: \"ae5ca663-7edb-49dd-a7a7-668eeace13f7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-n2dwh" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.627808 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-etcd-client\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.627905 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/bc7f5002-5906-428a-bb9e-c3507cc151c8-audit\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.628917 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a6decbe9-edda-413b-b067-665ccf6efece-serving-cert\") pod \"route-controller-manager-6576b87f9c-lv5c7\" (UID: \"a6decbe9-edda-413b-b067-665ccf6efece\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.629137 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.631337 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c4c9ff6e-a421-43d0-ac49-c398640d3677-service-ca-bundle\") pod \"authentication-operator-69f744f599-djx5v\" (UID: \"c4c9ff6e-a421-43d0-ac49-c398640d3677\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djx5v" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.631889 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4c9ff6e-a421-43d0-ac49-c398640d3677-config\") pod \"authentication-operator-69f744f599-djx5v\" (UID: \"c4c9ff6e-a421-43d0-ac49-c398640d3677\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djx5v" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.631922 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4qp6m"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.632543 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.632881 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.633285 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.633577 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4qp6m" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.633717 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.634547 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/e04521cd-f63c-40ba-a296-a34d7ce739d7-machine-approver-tls\") pod \"machine-approver-56656f9798-56bpb\" (UID: \"e04521cd-f63c-40ba-a296-a34d7ce739d7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-56bpb" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.636562 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-encryption-config\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.636890 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/021285de-f14a-481e-986e-8d07616865a2-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-gj47k\" (UID: \"021285de-f14a-481e-986e-8d07616865a2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gj47k" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.637181 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/95afc2a3-7cfb-4a24-b555-5e8c0d21e044-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-5dflw\" (UID: \"95afc2a3-7cfb-4a24-b555-5e8c0d21e044\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5dflw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.637415 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c4c9ff6e-a421-43d0-ac49-c398640d3677-serving-cert\") pod \"authentication-operator-69f744f599-djx5v\" (UID: \"c4c9ff6e-a421-43d0-ac49-c398640d3677\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djx5v" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.652222 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.652817 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9b8d64c-e7f3-4751-865c-c162aab7badd-config\") pod \"console-operator-58897d9998-c886j\" (UID: \"a9b8d64c-e7f3-4751-865c-c162aab7badd\") " pod="openshift-console-operator/console-operator-58897d9998-c886j" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.652856 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc7f5002-5906-428a-bb9e-c3507cc151c8-serving-cert\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.653558 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/f1d33349-6b41-43a0-9aa9-03084435fd75-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-mx8zv\" (UID: \"f1d33349-6b41-43a0-9aa9-03084435fd75\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mx8zv" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.654007 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-serving-cert\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.654533 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/bc7f5002-5906-428a-bb9e-c3507cc151c8-encryption-config\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.654557 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a9b8d64c-e7f3-4751-865c-c162aab7badd-serving-cert\") pod \"console-operator-58897d9998-c886j\" (UID: \"a9b8d64c-e7f3-4751-865c-c162aab7badd\") " pod="openshift-console-operator/console-operator-58897d9998-c886j" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.654607 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ndmnz"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.658878 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-x9lf7"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.658913 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jbfln"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.659107 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ndmnz" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.661089 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.662207 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-9dq5z"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.670889 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bc7f5002-5906-428a-bb9e-c3507cc151c8-etcd-client\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.698156 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.699781 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-zqgfl"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.699888 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-k4227"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.702973 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.721683 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-t958r"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.721994 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722037 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f7a5c45-edc8-443e-9730-f7a2eb5ab116-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-t958r\" (UID: \"5f7a5c45-edc8-443e-9730-f7a2eb5ab116\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-t958r" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722082 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dv55\" (UniqueName: \"kubernetes.io/projected/6e075f32-2803-4a2e-bf1a-0b1858adabf0-kube-api-access-6dv55\") pod \"ingress-operator-5b745b69d9-9dq5z\" (UID: \"6e075f32-2803-4a2e-bf1a-0b1858adabf0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9dq5z" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722121 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722165 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/25637e2c-a1e3-4449-a549-7b081d0c4c4c-stats-auth\") pod \"router-default-5444994796-4hh9n\" (UID: \"25637e2c-a1e3-4449-a549-7b081d0c4c4c\") " pod="openshift-ingress/router-default-5444994796-4hh9n" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722218 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722250 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722271 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/1f41fa87-ea96-4457-92a1-8bb69acc8b0e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-zf42k\" (UID: \"1f41fa87-ea96-4457-92a1-8bb69acc8b0e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-zf42k" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722296 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5f7a5c45-edc8-443e-9730-f7a2eb5ab116-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-t958r\" (UID: \"5f7a5c45-edc8-443e-9730-f7a2eb5ab116\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-t958r" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722330 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0883a675-24bc-4d9b-b318-feee05e49135-serving-cert\") pod \"controller-manager-879f6c89f-xgtj9\" (UID: \"0883a675-24bc-4d9b-b318-feee05e49135\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722353 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/32c545f6-2f66-4212-a7d0-01eab2f40da7-serving-cert\") pod \"openshift-config-operator-7777fb866f-6g9d2\" (UID: \"32c545f6-2f66-4212-a7d0-01eab2f40da7\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722401 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6e075f32-2803-4a2e-bf1a-0b1858adabf0-metrics-tls\") pod \"ingress-operator-5b745b69d9-9dq5z\" (UID: \"6e075f32-2803-4a2e-bf1a-0b1858adabf0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9dq5z" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722423 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0883a675-24bc-4d9b-b318-feee05e49135-client-ca\") pod \"controller-manager-879f6c89f-xgtj9\" (UID: \"0883a675-24bc-4d9b-b318-feee05e49135\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722447 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/25637e2c-a1e3-4449-a549-7b081d0c4c4c-metrics-certs\") pod \"router-default-5444994796-4hh9n\" (UID: \"25637e2c-a1e3-4449-a549-7b081d0c4c4c\") " pod="openshift-ingress/router-default-5444994796-4hh9n" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722471 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722497 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/25637e2c-a1e3-4449-a549-7b081d0c4c4c-service-ca-bundle\") pod \"router-default-5444994796-4hh9n\" (UID: \"25637e2c-a1e3-4449-a549-7b081d0c4c4c\") " pod="openshift-ingress/router-default-5444994796-4hh9n" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722540 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6e075f32-2803-4a2e-bf1a-0b1858adabf0-trusted-ca\") pod \"ingress-operator-5b745b69d9-9dq5z\" (UID: \"6e075f32-2803-4a2e-bf1a-0b1858adabf0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9dq5z" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722569 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0883a675-24bc-4d9b-b318-feee05e49135-config\") pod \"controller-manager-879f6c89f-xgtj9\" (UID: \"0883a675-24bc-4d9b-b318-feee05e49135\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722591 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f7a5c45-edc8-443e-9730-f7a2eb5ab116-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-t958r\" (UID: \"5f7a5c45-edc8-443e-9730-f7a2eb5ab116\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-t958r" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722616 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6e075f32-2803-4a2e-bf1a-0b1858adabf0-bound-sa-token\") pod \"ingress-operator-5b745b69d9-9dq5z\" (UID: \"6e075f32-2803-4a2e-bf1a-0b1858adabf0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9dq5z" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722637 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6x2n\" (UniqueName: \"kubernetes.io/projected/0883a675-24bc-4d9b-b318-feee05e49135-kube-api-access-z6x2n\") pod \"controller-manager-879f6c89f-xgtj9\" (UID: \"0883a675-24bc-4d9b-b318-feee05e49135\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722658 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wx67\" (UniqueName: \"kubernetes.io/projected/b073719c-394b-496f-9d64-75681184acb0-kube-api-access-6wx67\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722689 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrznn\" (UniqueName: \"kubernetes.io/projected/1f41fa87-ea96-4457-92a1-8bb69acc8b0e-kube-api-access-nrznn\") pod \"multus-admission-controller-857f4d67dd-zf42k\" (UID: \"1f41fa87-ea96-4457-92a1-8bb69acc8b0e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-zf42k" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722712 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0883a675-24bc-4d9b-b318-feee05e49135-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-xgtj9\" (UID: \"0883a675-24bc-4d9b-b318-feee05e49135\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722792 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722816 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722841 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wpp5\" (UniqueName: \"kubernetes.io/projected/25637e2c-a1e3-4449-a549-7b081d0c4c4c-kube-api-access-4wpp5\") pod \"router-default-5444994796-4hh9n\" (UID: \"25637e2c-a1e3-4449-a549-7b081d0c4c4c\") " pod="openshift-ingress/router-default-5444994796-4hh9n" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722863 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722893 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/25637e2c-a1e3-4449-a549-7b081d0c4c4c-default-certificate\") pod \"router-default-5444994796-4hh9n\" (UID: \"25637e2c-a1e3-4449-a549-7b081d0c4c4c\") " pod="openshift-ingress/router-default-5444994796-4hh9n" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722918 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-audit-policies\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722932 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722945 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.722969 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glm6q\" (UniqueName: \"kubernetes.io/projected/32c545f6-2f66-4212-a7d0-01eab2f40da7-kube-api-access-glm6q\") pod \"openshift-config-operator-7777fb866f-6g9d2\" (UID: \"32c545f6-2f66-4212-a7d0-01eab2f40da7\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.723001 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b073719c-394b-496f-9d64-75681184acb0-audit-dir\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.723033 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.723058 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/32c545f6-2f66-4212-a7d0-01eab2f40da7-available-featuregates\") pod \"openshift-config-operator-7777fb866f-6g9d2\" (UID: \"32c545f6-2f66-4212-a7d0-01eab2f40da7\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.723086 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.723259 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.724803 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0883a675-24bc-4d9b-b318-feee05e49135-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-xgtj9\" (UID: \"0883a675-24bc-4d9b-b318-feee05e49135\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.725411 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.725461 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-b8b2f"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.725680 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0883a675-24bc-4d9b-b318-feee05e49135-config\") pod \"controller-manager-879f6c89f-xgtj9\" (UID: \"0883a675-24bc-4d9b-b318-feee05e49135\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.725771 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gkvs9"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.726001 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.726101 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0883a675-24bc-4d9b-b318-feee05e49135-serving-cert\") pod \"controller-manager-879f6c89f-xgtj9\" (UID: \"0883a675-24bc-4d9b-b318-feee05e49135\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.726122 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-audit-policies\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.726569 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b073719c-394b-496f-9d64-75681184acb0-audit-dir\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.726868 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/32c545f6-2f66-4212-a7d0-01eab2f40da7-available-featuregates\") pod \"openshift-config-operator-7777fb866f-6g9d2\" (UID: \"32c545f6-2f66-4212-a7d0-01eab2f40da7\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.727342 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-29j84"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.728344 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-29j84" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.729004 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-kln9b"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.729395 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.729642 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.729789 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6e075f32-2803-4a2e-bf1a-0b1858adabf0-metrics-tls\") pod \"ingress-operator-5b745b69d9-9dq5z\" (UID: \"6e075f32-2803-4a2e-bf1a-0b1858adabf0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9dq5z" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.730845 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0883a675-24bc-4d9b-b318-feee05e49135-client-ca\") pod \"controller-manager-879f6c89f-xgtj9\" (UID: \"0883a675-24bc-4d9b-b318-feee05e49135\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.730973 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-zf42k"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.731407 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.731770 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.732604 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-6kn6v"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.733732 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-k884h"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.733851 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/32c545f6-2f66-4212-a7d0-01eab2f40da7-serving-cert\") pod \"openshift-config-operator-7777fb866f-6g9d2\" (UID: \"32c545f6-2f66-4212-a7d0-01eab2f40da7\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.734908 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.735328 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.735617 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.736433 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-djx5v"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.736814 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.737835 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bmdnh"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.738659 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.739158 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ndmnz"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.739301 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.743689 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-6gfhg"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.743719 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.744614 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-md284"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.745940 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4qp6m"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.746874 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6e075f32-2803-4a2e-bf1a-0b1858adabf0-trusted-ca\") pod \"ingress-operator-5b745b69d9-9dq5z\" (UID: \"6e075f32-2803-4a2e-bf1a-0b1858adabf0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9dq5z" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.747281 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8t5gh"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.748726 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-29j84"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.750129 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.752197 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-fzl9j"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.753345 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-fzl9j" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.753675 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-pcqbh"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.755843 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-fzl9j"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.755953 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.756364 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-pcqbh"] Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.759437 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.770694 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/25637e2c-a1e3-4449-a549-7b081d0c4c4c-metrics-certs\") pod \"router-default-5444994796-4hh9n\" (UID: \"25637e2c-a1e3-4449-a549-7b081d0c4c4c\") " pod="openshift-ingress/router-default-5444994796-4hh9n" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.778836 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.800123 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.809856 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/25637e2c-a1e3-4449-a549-7b081d0c4c4c-default-certificate\") pod \"router-default-5444994796-4hh9n\" (UID: \"25637e2c-a1e3-4449-a549-7b081d0c4c4c\") " pod="openshift-ingress/router-default-5444994796-4hh9n" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.819521 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.827119 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/25637e2c-a1e3-4449-a549-7b081d0c4c4c-stats-auth\") pod \"router-default-5444994796-4hh9n\" (UID: \"25637e2c-a1e3-4449-a549-7b081d0c4c4c\") " pod="openshift-ingress/router-default-5444994796-4hh9n" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.839397 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.846869 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/25637e2c-a1e3-4449-a549-7b081d0c4c4c-service-ca-bundle\") pod \"router-default-5444994796-4hh9n\" (UID: \"25637e2c-a1e3-4449-a549-7b081d0c4c4c\") " pod="openshift-ingress/router-default-5444994796-4hh9n" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.859843 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.880189 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.899901 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.920159 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.939856 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.958671 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.966323 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f7a5c45-edc8-443e-9730-f7a2eb5ab116-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-t958r\" (UID: \"5f7a5c45-edc8-443e-9730-f7a2eb5ab116\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-t958r" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.980659 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 24 13:20:28 crc kubenswrapper[5039]: I1124 13:20:28.999670 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.003167 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f7a5c45-edc8-443e-9730-f7a2eb5ab116-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-t958r\" (UID: \"5f7a5c45-edc8-443e-9730-f7a2eb5ab116\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-t958r" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.019273 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.040691 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.058963 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.080339 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.100412 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.120019 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.139771 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.161816 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.166080 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/1f41fa87-ea96-4457-92a1-8bb69acc8b0e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-zf42k\" (UID: \"1f41fa87-ea96-4457-92a1-8bb69acc8b0e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-zf42k" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.179129 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.241202 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.260750 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.280711 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.300482 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.320030 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.339792 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.360022 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.380207 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.400305 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.419100 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.439239 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.459310 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.480019 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.499829 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.518799 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.539972 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.559405 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.579717 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.597850 5039 request.go:700] Waited for 1.000812283s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/secrets?fieldSelector=metadata.name%3Dmarketplace-operator-dockercfg-5nsgg&limit=500&resourceVersion=0 Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.599834 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.620245 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.653443 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.662006 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.678955 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.698996 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.719310 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.740224 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.759750 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.778461 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.799245 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.820024 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.838955 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.859644 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.880595 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.900192 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.919607 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.939865 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.959218 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.980771 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 24 13:20:29 crc kubenswrapper[5039]: I1124 13:20:29.999712 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.020100 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.040175 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.078094 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4rsd\" (UniqueName: \"kubernetes.io/projected/c7032d1d-5aae-4e50-b10f-3df40a0cd983-kube-api-access-d4rsd\") pod \"downloads-7954f5f757-98dp9\" (UID: \"c7032d1d-5aae-4e50-b10f-3df40a0cd983\") " pod="openshift-console/downloads-7954f5f757-98dp9" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.080112 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.114985 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n5xgs\" (UniqueName: \"kubernetes.io/projected/95afc2a3-7cfb-4a24-b555-5e8c0d21e044-kube-api-access-n5xgs\") pod \"cluster-image-registry-operator-dc59b4c8b-5dflw\" (UID: \"95afc2a3-7cfb-4a24-b555-5e8c0d21e044\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5dflw" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.118093 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-98dp9" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.135103 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlg79\" (UniqueName: \"kubernetes.io/projected/c4c9ff6e-a421-43d0-ac49-c398640d3677-kube-api-access-nlg79\") pod \"authentication-operator-69f744f599-djx5v\" (UID: \"c4c9ff6e-a421-43d0-ac49-c398640d3677\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djx5v" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.138761 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.160004 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.180118 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.199464 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.223224 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-djx5v" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.246989 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnwxs\" (UniqueName: \"kubernetes.io/projected/a9b8d64c-e7f3-4751-865c-c162aab7badd-kube-api-access-qnwxs\") pod \"console-operator-58897d9998-c886j\" (UID: \"a9b8d64c-e7f3-4751-865c-c162aab7badd\") " pod="openshift-console-operator/console-operator-58897d9998-c886j" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.262720 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v46xk\" (UniqueName: \"kubernetes.io/projected/021285de-f14a-481e-986e-8d07616865a2-kube-api-access-v46xk\") pod \"openshift-apiserver-operator-796bbdcf4f-gj47k\" (UID: \"021285de-f14a-481e-986e-8d07616865a2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gj47k" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.275205 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbfxg\" (UniqueName: \"kubernetes.io/projected/ae5ca663-7edb-49dd-a7a7-668eeace13f7-kube-api-access-sbfxg\") pod \"machine-api-operator-5694c8668f-n2dwh\" (UID: \"ae5ca663-7edb-49dd-a7a7-668eeace13f7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-n2dwh" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.293528 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2flbf\" (UniqueName: \"kubernetes.io/projected/a6decbe9-edda-413b-b067-665ccf6efece-kube-api-access-2flbf\") pod \"route-controller-manager-6576b87f9c-lv5c7\" (UID: \"a6decbe9-edda-413b-b067-665ccf6efece\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.313803 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gj47k" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.314136 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.324455 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/95afc2a3-7cfb-4a24-b555-5e8c0d21e044-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-5dflw\" (UID: \"95afc2a3-7cfb-4a24-b555-5e8c0d21e044\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5dflw" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.335110 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtjb7\" (UniqueName: \"kubernetes.io/projected/bc7f5002-5906-428a-bb9e-c3507cc151c8-kube-api-access-dtjb7\") pod \"apiserver-76f77b778f-9jvgw\" (UID: \"bc7f5002-5906-428a-bb9e-c3507cc151c8\") " pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.353837 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vtfxm\" (UniqueName: \"kubernetes.io/projected/cf6c1c3b-3f1c-449f-966b-d8617a4ca73d-kube-api-access-vtfxm\") pod \"apiserver-7bbb656c7d-6l58p\" (UID: \"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.357911 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-n2dwh" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.368641 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-98dp9"] Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.377822 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-425dx\" (UniqueName: \"kubernetes.io/projected/f1d33349-6b41-43a0-9aa9-03084435fd75-kube-api-access-425dx\") pod \"cluster-samples-operator-665b6dd947-mx8zv\" (UID: \"f1d33349-6b41-43a0-9aa9-03084435fd75\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mx8zv" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.394207 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.395075 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4gph\" (UniqueName: \"kubernetes.io/projected/e04521cd-f63c-40ba-a296-a34d7ce739d7-kube-api-access-v4gph\") pod \"machine-approver-56656f9798-56bpb\" (UID: \"e04521cd-f63c-40ba-a296-a34d7ce739d7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-56bpb" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.398945 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-djx5v"] Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.400777 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.401075 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-c886j" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.411303 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5dflw" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.420104 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.443415 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.459784 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.483000 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.499874 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.535111 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-56bpb" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.535580 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dv55\" (UniqueName: \"kubernetes.io/projected/6e075f32-2803-4a2e-bf1a-0b1858adabf0-kube-api-access-6dv55\") pod \"ingress-operator-5b745b69d9-9dq5z\" (UID: \"6e075f32-2803-4a2e-bf1a-0b1858adabf0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9dq5z" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.562788 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6x2n\" (UniqueName: \"kubernetes.io/projected/0883a675-24bc-4d9b-b318-feee05e49135-kube-api-access-z6x2n\") pod \"controller-manager-879f6c89f-xgtj9\" (UID: \"0883a675-24bc-4d9b-b318-feee05e49135\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.575223 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6e075f32-2803-4a2e-bf1a-0b1858adabf0-bound-sa-token\") pod \"ingress-operator-5b745b69d9-9dq5z\" (UID: \"6e075f32-2803-4a2e-bf1a-0b1858adabf0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9dq5z" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.580928 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mx8zv" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.594071 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5f7a5c45-edc8-443e-9730-f7a2eb5ab116-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-t958r\" (UID: \"5f7a5c45-edc8-443e-9730-f7a2eb5ab116\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-t958r" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.595808 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9dq5z" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.595978 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gj47k"] Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.609654 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-n2dwh"] Nov 24 13:20:30 crc kubenswrapper[5039]: W1124 13:20:30.610116 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode04521cd_f63c_40ba_a296_a34d7ce739d7.slice/crio-c8d718d375842690163002f4fccae8ca05b2e2da57688ada908ed0171d19dfcc WatchSource:0}: Error finding container c8d718d375842690163002f4fccae8ca05b2e2da57688ada908ed0171d19dfcc: Status 404 returned error can't find the container with id c8d718d375842690163002f4fccae8ca05b2e2da57688ada908ed0171d19dfcc Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.612564 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wx67\" (UniqueName: \"kubernetes.io/projected/b073719c-394b-496f-9d64-75681184acb0-kube-api-access-6wx67\") pod \"oauth-openshift-558db77b4-b8b2f\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.615699 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-t958r" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.619950 5039 request.go:700] Waited for 1.894818129s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-multus/serviceaccounts/multus-ac/token Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.625608 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.636182 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrznn\" (UniqueName: \"kubernetes.io/projected/1f41fa87-ea96-4457-92a1-8bb69acc8b0e-kube-api-access-nrznn\") pod \"multus-admission-controller-857f4d67dd-zf42k\" (UID: \"1f41fa87-ea96-4457-92a1-8bb69acc8b0e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-zf42k" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.675817 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wpp5\" (UniqueName: \"kubernetes.io/projected/25637e2c-a1e3-4449-a549-7b081d0c4c4c-kube-api-access-4wpp5\") pod \"router-default-5444994796-4hh9n\" (UID: \"25637e2c-a1e3-4449-a549-7b081d0c4c4c\") " pod="openshift-ingress/router-default-5444994796-4hh9n" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.680260 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.700606 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.700925 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-c886j"] Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.701464 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glm6q\" (UniqueName: \"kubernetes.io/projected/32c545f6-2f66-4212-a7d0-01eab2f40da7-kube-api-access-glm6q\") pod \"openshift-config-operator-7777fb866f-6g9d2\" (UID: \"32c545f6-2f66-4212-a7d0-01eab2f40da7\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.705466 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7"] Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.722625 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.733184 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-9jvgw"] Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.740132 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.760039 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.782638 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.800356 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.803080 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2" Nov 24 13:20:30 crc kubenswrapper[5039]: W1124 13:20:30.811779 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbc7f5002_5906_428a_bb9e_c3507cc151c8.slice/crio-7c217b44f2d6afd8024f2384b06f451dc91abecceac23af3940c19ea42755603 WatchSource:0}: Error finding container 7c217b44f2d6afd8024f2384b06f451dc91abecceac23af3940c19ea42755603: Status 404 returned error can't find the container with id 7c217b44f2d6afd8024f2384b06f451dc91abecceac23af3940c19ea42755603 Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.815011 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.819864 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.840169 5039 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.848702 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.860865 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.868973 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mx8zv"] Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.895913 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5dflw"] Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.902431 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-4hh9n" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.909242 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-9dq5z"] Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.935847 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-zf42k" Nov 24 13:20:30 crc kubenswrapper[5039]: W1124 13:20:30.938911 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod95afc2a3_7cfb_4a24_b555_5e8c0d21e044.slice/crio-aa73827c6c30a5e9677ea210abfacc6dd02e01c9812b2ff4a49b5930618fa28f WatchSource:0}: Error finding container aa73827c6c30a5e9677ea210abfacc6dd02e01c9812b2ff4a49b5930618fa28f: Status 404 returned error can't find the container with id aa73827c6c30a5e9677ea210abfacc6dd02e01c9812b2ff4a49b5930618fa28f Nov 24 13:20:30 crc kubenswrapper[5039]: W1124 13:20:30.944206 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6e075f32_2803_4a2e_bf1a_0b1858adabf0.slice/crio-dc68d25e212f7dad86ff42d3b4419d8ea67ed64fb5afe236989113d126e7c14c WatchSource:0}: Error finding container dc68d25e212f7dad86ff42d3b4419d8ea67ed64fb5afe236989113d126e7c14c: Status 404 returned error can't find the container with id dc68d25e212f7dad86ff42d3b4419d8ea67ed64fb5afe236989113d126e7c14c Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.961409 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/750f36ae-2e78-4a6d-8e78-e315d507d436-registry-certificates\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.961449 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-console-serving-cert\") pod \"console-f9d7485db-zqgfl\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.961472 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-trusted-ca-bundle\") pod \"console-f9d7485db-zqgfl\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.961495 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/750f36ae-2e78-4a6d-8e78-e315d507d436-ca-trust-extracted\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.961593 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.961623 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/750f36ae-2e78-4a6d-8e78-e315d507d436-installation-pull-secrets\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.961653 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20f8a1d6-8417-4cd3-9af7-b8e8f05f4c52-config\") pod \"kube-apiserver-operator-766d6c64bb-jbfln\" (UID: \"20f8a1d6-8417-4cd3-9af7-b8e8f05f4c52\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jbfln" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.961702 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nssgn\" (UniqueName: \"kubernetes.io/projected/f4787aef-1d33-49b7-b5c3-7b0404bab7f5-kube-api-access-nssgn\") pod \"openshift-controller-manager-operator-756b6f6bc6-blxxm\" (UID: \"f4787aef-1d33-49b7-b5c3-7b0404bab7f5\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-blxxm" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.961736 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6hg5\" (UniqueName: \"kubernetes.io/projected/750f36ae-2e78-4a6d-8e78-e315d507d436-kube-api-access-x6hg5\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.961786 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7tbf\" (UniqueName: \"kubernetes.io/projected/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-kube-api-access-v7tbf\") pod \"console-f9d7485db-zqgfl\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.961823 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f4787aef-1d33-49b7-b5c3-7b0404bab7f5-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-blxxm\" (UID: \"f4787aef-1d33-49b7-b5c3-7b0404bab7f5\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-blxxm" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.962191 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-console-config\") pod \"console-f9d7485db-zqgfl\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.962240 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/2c2dcfde-39a1-42d0-ba00-d18199782ba5-images\") pod \"machine-config-operator-74547568cd-x9lf7\" (UID: \"2c2dcfde-39a1-42d0-ba00-d18199782ba5\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-x9lf7" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.962260 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8mqr\" (UniqueName: \"kubernetes.io/projected/2c2dcfde-39a1-42d0-ba00-d18199782ba5-kube-api-access-l8mqr\") pod \"machine-config-operator-74547568cd-x9lf7\" (UID: \"2c2dcfde-39a1-42d0-ba00-d18199782ba5\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-x9lf7" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.962281 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/20f8a1d6-8417-4cd3-9af7-b8e8f05f4c52-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-jbfln\" (UID: \"20f8a1d6-8417-4cd3-9af7-b8e8f05f4c52\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jbfln" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.962321 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/750f36ae-2e78-4a6d-8e78-e315d507d436-registry-tls\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.962344 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/20f8a1d6-8417-4cd3-9af7-b8e8f05f4c52-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-jbfln\" (UID: \"20f8a1d6-8417-4cd3-9af7-b8e8f05f4c52\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jbfln" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.962377 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/750f36ae-2e78-4a6d-8e78-e315d507d436-bound-sa-token\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.962412 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-console-oauth-config\") pod \"console-f9d7485db-zqgfl\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.962438 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4787aef-1d33-49b7-b5c3-7b0404bab7f5-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-blxxm\" (UID: \"f4787aef-1d33-49b7-b5c3-7b0404bab7f5\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-blxxm" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.962472 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/750f36ae-2e78-4a6d-8e78-e315d507d436-trusted-ca\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.962551 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-service-ca\") pod \"console-f9d7485db-zqgfl\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.962583 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2c2dcfde-39a1-42d0-ba00-d18199782ba5-auth-proxy-config\") pod \"machine-config-operator-74547568cd-x9lf7\" (UID: \"2c2dcfde-39a1-42d0-ba00-d18199782ba5\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-x9lf7" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.962613 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/792734d4-55a5-4a6f-be71-52a83b22c73f-metrics-tls\") pod \"dns-operator-744455d44c-k4227\" (UID: \"792734d4-55a5-4a6f-be71-52a83b22c73f\") " pod="openshift-dns-operator/dns-operator-744455d44c-k4227" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.962639 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-oauth-serving-cert\") pod \"console-f9d7485db-zqgfl\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.962661 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdksb\" (UniqueName: \"kubernetes.io/projected/792734d4-55a5-4a6f-be71-52a83b22c73f-kube-api-access-gdksb\") pod \"dns-operator-744455d44c-k4227\" (UID: \"792734d4-55a5-4a6f-be71-52a83b22c73f\") " pod="openshift-dns-operator/dns-operator-744455d44c-k4227" Nov 24 13:20:30 crc kubenswrapper[5039]: I1124 13:20:30.962687 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/2c2dcfde-39a1-42d0-ba00-d18199782ba5-proxy-tls\") pod \"machine-config-operator-74547568cd-x9lf7\" (UID: \"2c2dcfde-39a1-42d0-ba00-d18199782ba5\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-x9lf7" Nov 24 13:20:30 crc kubenswrapper[5039]: E1124 13:20:30.963899 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:31.463883926 +0000 UTC m=+143.903008426 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:31 crc kubenswrapper[5039]: W1124 13:20:31.002361 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod25637e2c_a1e3_4449_a549_7b081d0c4c4c.slice/crio-05fb2f77eeb0d345a4d9dfc4d7cd159a41dc3c486a20ed596310650f3e905e31 WatchSource:0}: Error finding container 05fb2f77eeb0d345a4d9dfc4d7cd159a41dc3c486a20ed596310650f3e905e31: Status 404 returned error can't find the container with id 05fb2f77eeb0d345a4d9dfc4d7cd159a41dc3c486a20ed596310650f3e905e31 Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.030257 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-djx5v" event={"ID":"c4c9ff6e-a421-43d0-ac49-c398640d3677","Type":"ContainerStarted","Data":"be2e6ea8473b87c7e6d102b75dab7d82293b974f4c2e19ff2630aaa8fbed03bb"} Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.030301 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-djx5v" event={"ID":"c4c9ff6e-a421-43d0-ac49-c398640d3677","Type":"ContainerStarted","Data":"fe35113a1223e15f93a1fa4628c9cc719307b7baeec398530b537e9910a1900b"} Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.040996 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" event={"ID":"bc7f5002-5906-428a-bb9e-c3507cc151c8","Type":"ContainerStarted","Data":"7c217b44f2d6afd8024f2384b06f451dc91abecceac23af3940c19ea42755603"} Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.048389 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gj47k" event={"ID":"021285de-f14a-481e-986e-8d07616865a2","Type":"ContainerStarted","Data":"a62852a222d2e3fa9a648870740f0e86d67e3e99ac872f45f1957b68046989ad"} Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.065350 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.065847 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gdksb\" (UniqueName: \"kubernetes.io/projected/792734d4-55a5-4a6f-be71-52a83b22c73f-kube-api-access-gdksb\") pod \"dns-operator-744455d44c-k4227\" (UID: \"792734d4-55a5-4a6f-be71-52a83b22c73f\") " pod="openshift-dns-operator/dns-operator-744455d44c-k4227" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.065925 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s48b6\" (UniqueName: \"kubernetes.io/projected/5d53487c-6d56-443c-94d3-899cd8be9666-kube-api-access-s48b6\") pod \"catalog-operator-68c6474976-bmdnh\" (UID: \"5d53487c-6d56-443c-94d3-899cd8be9666\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bmdnh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066051 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-trusted-ca-bundle\") pod \"console-f9d7485db-zqgfl\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066083 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/21cbf909-e064-465c-bb64-5b0d5c82d691-registration-dir\") pod \"csi-hostpathplugin-pcqbh\" (UID: \"21cbf909-e064-465c-bb64-5b0d5c82d691\") " pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066135 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spd47\" (UniqueName: \"kubernetes.io/projected/b74506b2-3cd6-4803-9a11-69fa714e570e-kube-api-access-spd47\") pod \"service-ca-9c57cc56f-6gfhg\" (UID: \"b74506b2-3cd6-4803-9a11-69fa714e570e\") " pod="openshift-service-ca/service-ca-9c57cc56f-6gfhg" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066184 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/1988c73c-a04d-4b50-af92-54dfc2a4a262-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-4qp6m\" (UID: \"1988c73c-a04d-4b50-af92-54dfc2a4a262\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4qp6m" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066228 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/75c9640f-f412-4f63-ac86-f4d665d57038-certs\") pod \"machine-config-server-krwk4\" (UID: \"75c9640f-f412-4f63-ac86-f4d665d57038\") " pod="openshift-machine-config-operator/machine-config-server-krwk4" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066258 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/750f36ae-2e78-4a6d-8e78-e315d507d436-ca-trust-extracted\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066283 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjtwg\" (UniqueName: \"kubernetes.io/projected/2d22e4bd-1186-43a6-bc4f-d313377c78f3-kube-api-access-hjtwg\") pod \"package-server-manager-789f6589d5-ndmnz\" (UID: \"2d22e4bd-1186-43a6-bc4f-d313377c78f3\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ndmnz" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066394 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/750f36ae-2e78-4a6d-8e78-e315d507d436-installation-pull-secrets\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066419 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkrjz\" (UniqueName: \"kubernetes.io/projected/4637ec55-c9ee-48a4-9351-6a382efe4c91-kube-api-access-fkrjz\") pod \"marketplace-operator-79b997595-k884h\" (UID: \"4637ec55-c9ee-48a4-9351-6a382efe4c91\") " pod="openshift-marketplace/marketplace-operator-79b997595-k884h" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066450 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1135d11d-6ba1-46f6-b01d-0cb7529831e7-serving-cert\") pod \"service-ca-operator-777779d784-md284\" (UID: \"1135d11d-6ba1-46f6-b01d-0cb7529831e7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-md284" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066497 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20f8a1d6-8417-4cd3-9af7-b8e8f05f4c52-config\") pod \"kube-apiserver-operator-766d6c64bb-jbfln\" (UID: \"20f8a1d6-8417-4cd3-9af7-b8e8f05f4c52\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jbfln" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066559 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/efd3afce-d8bd-46c0-8bc6-ebace6984f16-etcd-client\") pod \"etcd-operator-b45778765-kln9b\" (UID: \"efd3afce-d8bd-46c0-8bc6-ebace6984f16\") " pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066590 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/157eb7c6-f1df-4336-8796-d100eda6102d-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-gkvs9\" (UID: \"157eb7c6-f1df-4336-8796-d100eda6102d\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gkvs9" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066625 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/75c9640f-f412-4f63-ac86-f4d665d57038-node-bootstrap-token\") pod \"machine-config-server-krwk4\" (UID: \"75c9640f-f412-4f63-ac86-f4d665d57038\") " pod="openshift-machine-config-operator/machine-config-server-krwk4" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066656 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nssgn\" (UniqueName: \"kubernetes.io/projected/f4787aef-1d33-49b7-b5c3-7b0404bab7f5-kube-api-access-nssgn\") pod \"openshift-controller-manager-operator-756b6f6bc6-blxxm\" (UID: \"f4787aef-1d33-49b7-b5c3-7b0404bab7f5\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-blxxm" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066682 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkkhz\" (UniqueName: \"kubernetes.io/projected/4f19e739-3cb7-4007-8fce-7bfb0f76e0a4-kube-api-access-zkkhz\") pod \"machine-config-controller-84d6567774-6kn6v\" (UID: \"4f19e739-3cb7-4007-8fce-7bfb0f76e0a4\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6kn6v" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066712 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/988022f9-58e3-429b-9940-657283113440-tmpfs\") pod \"packageserver-d55dfcdfc-g5ldp\" (UID: \"988022f9-58e3-429b-9940-657283113440\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066741 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j989k\" (UniqueName: \"kubernetes.io/projected/eded421c-1f8d-4719-ac97-30116f0eda31-kube-api-access-j989k\") pod \"dns-default-29j84\" (UID: \"eded421c-1f8d-4719-ac97-30116f0eda31\") " pod="openshift-dns/dns-default-29j84" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066773 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1135d11d-6ba1-46f6-b01d-0cb7529831e7-config\") pod \"service-ca-operator-777779d784-md284\" (UID: \"1135d11d-6ba1-46f6-b01d-0cb7529831e7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-md284" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066802 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6hg5\" (UniqueName: \"kubernetes.io/projected/750f36ae-2e78-4a6d-8e78-e315d507d436-kube-api-access-x6hg5\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066854 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/4f19e739-3cb7-4007-8fce-7bfb0f76e0a4-proxy-tls\") pod \"machine-config-controller-84d6567774-6kn6v\" (UID: \"4f19e739-3cb7-4007-8fce-7bfb0f76e0a4\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6kn6v" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066926 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7tbf\" (UniqueName: \"kubernetes.io/projected/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-kube-api-access-v7tbf\") pod \"console-f9d7485db-zqgfl\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066955 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/21cbf909-e064-465c-bb64-5b0d5c82d691-mountpoint-dir\") pod \"csi-hostpathplugin-pcqbh\" (UID: \"21cbf909-e064-465c-bb64-5b0d5c82d691\") " pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.066983 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4fk5g\" (UniqueName: \"kubernetes.io/projected/1988c73c-a04d-4b50-af92-54dfc2a4a262-kube-api-access-4fk5g\") pod \"control-plane-machine-set-operator-78cbb6b69f-4qp6m\" (UID: \"1988c73c-a04d-4b50-af92-54dfc2a4a262\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4qp6m" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.067030 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/988022f9-58e3-429b-9940-657283113440-webhook-cert\") pod \"packageserver-d55dfcdfc-g5ldp\" (UID: \"988022f9-58e3-429b-9940-657283113440\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.067100 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f4787aef-1d33-49b7-b5c3-7b0404bab7f5-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-blxxm\" (UID: \"f4787aef-1d33-49b7-b5c3-7b0404bab7f5\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-blxxm" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.067125 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqxxt\" (UniqueName: \"kubernetes.io/projected/21cbf909-e064-465c-bb64-5b0d5c82d691-kube-api-access-gqxxt\") pod \"csi-hostpathplugin-pcqbh\" (UID: \"21cbf909-e064-465c-bb64-5b0d5c82d691\") " pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.067209 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23c83b86-90ee-4d41-9362-feaba87dfc0c-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-8t5gh\" (UID: \"23c83b86-90ee-4d41-9362-feaba87dfc0c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8t5gh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.067278 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8mqr\" (UniqueName: \"kubernetes.io/projected/2c2dcfde-39a1-42d0-ba00-d18199782ba5-kube-api-access-l8mqr\") pod \"machine-config-operator-74547568cd-x9lf7\" (UID: \"2c2dcfde-39a1-42d0-ba00-d18199782ba5\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-x9lf7" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.067309 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/20f8a1d6-8417-4cd3-9af7-b8e8f05f4c52-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-jbfln\" (UID: \"20f8a1d6-8417-4cd3-9af7-b8e8f05f4c52\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jbfln" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.067340 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/157eb7c6-f1df-4336-8796-d100eda6102d-config\") pod \"kube-controller-manager-operator-78b949d7b-gkvs9\" (UID: \"157eb7c6-f1df-4336-8796-d100eda6102d\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gkvs9" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.067448 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4227\" (UniqueName: \"kubernetes.io/projected/23c83b86-90ee-4d41-9362-feaba87dfc0c-kube-api-access-r4227\") pod \"kube-storage-version-migrator-operator-b67b599dd-8t5gh\" (UID: \"23c83b86-90ee-4d41-9362-feaba87dfc0c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8t5gh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.067497 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/20f8a1d6-8417-4cd3-9af7-b8e8f05f4c52-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-jbfln\" (UID: \"20f8a1d6-8417-4cd3-9af7-b8e8f05f4c52\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jbfln" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.067557 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/17a9ee7f-16c3-40ce-b614-a30cee1e8d83-srv-cert\") pod \"olm-operator-6b444d44fb-5q4db\" (UID: \"17a9ee7f-16c3-40ce-b614-a30cee1e8d83\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.067602 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-console-oauth-config\") pod \"console-f9d7485db-zqgfl\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.067630 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4787aef-1d33-49b7-b5c3-7b0404bab7f5-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-blxxm\" (UID: \"f4787aef-1d33-49b7-b5c3-7b0404bab7f5\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-blxxm" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.067677 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8m6p\" (UniqueName: \"kubernetes.io/projected/efd3afce-d8bd-46c0-8bc6-ebace6984f16-kube-api-access-s8m6p\") pod \"etcd-operator-b45778765-kln9b\" (UID: \"efd3afce-d8bd-46c0-8bc6-ebace6984f16\") " pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.067725 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-service-ca\") pod \"console-f9d7485db-zqgfl\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.067794 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hb55z\" (UniqueName: \"kubernetes.io/projected/55fb48e8-0db2-4fe4-b3f8-1ede33b3a49d-kube-api-access-hb55z\") pod \"migrator-59844c95c7-2l986\" (UID: \"55fb48e8-0db2-4fe4-b3f8-1ede33b3a49d\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2l986" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.067819 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/792734d4-55a5-4a6f-be71-52a83b22c73f-metrics-tls\") pod \"dns-operator-744455d44c-k4227\" (UID: \"792734d4-55a5-4a6f-be71-52a83b22c73f\") " pod="openshift-dns-operator/dns-operator-744455d44c-k4227" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.067856 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/17a9ee7f-16c3-40ce-b614-a30cee1e8d83-profile-collector-cert\") pod \"olm-operator-6b444d44fb-5q4db\" (UID: \"17a9ee7f-16c3-40ce-b614-a30cee1e8d83\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.067885 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/5d53487c-6d56-443c-94d3-899cd8be9666-srv-cert\") pod \"catalog-operator-68c6474976-bmdnh\" (UID: \"5d53487c-6d56-443c-94d3-899cd8be9666\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bmdnh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.067981 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fm2tl\" (UniqueName: \"kubernetes.io/projected/6170f687-30e2-44b0-860e-ddcee4e4f2d4-kube-api-access-fm2tl\") pod \"collect-profiles-29399835-8xp54\" (UID: \"6170f687-30e2-44b0-860e-ddcee4e4f2d4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.068032 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/b74506b2-3cd6-4803-9a11-69fa714e570e-signing-cabundle\") pod \"service-ca-9c57cc56f-6gfhg\" (UID: \"b74506b2-3cd6-4803-9a11-69fa714e570e\") " pod="openshift-service-ca/service-ca-9c57cc56f-6gfhg" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.068059 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/2c2dcfde-39a1-42d0-ba00-d18199782ba5-proxy-tls\") pod \"machine-config-operator-74547568cd-x9lf7\" (UID: \"2c2dcfde-39a1-42d0-ba00-d18199782ba5\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-x9lf7" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.068092 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4637ec55-c9ee-48a4-9351-6a382efe4c91-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-k884h\" (UID: \"4637ec55-c9ee-48a4-9351-6a382efe4c91\") " pod="openshift-marketplace/marketplace-operator-79b997595-k884h" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.068120 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/efd3afce-d8bd-46c0-8bc6-ebace6984f16-etcd-ca\") pod \"etcd-operator-b45778765-kln9b\" (UID: \"efd3afce-d8bd-46c0-8bc6-ebace6984f16\") " pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.068165 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/750f36ae-2e78-4a6d-8e78-e315d507d436-registry-certificates\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.068193 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-console-serving-cert\") pod \"console-f9d7485db-zqgfl\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.068220 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/efd3afce-d8bd-46c0-8bc6-ebace6984f16-etcd-service-ca\") pod \"etcd-operator-b45778765-kln9b\" (UID: \"efd3afce-d8bd-46c0-8bc6-ebace6984f16\") " pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.068247 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swxh5\" (UniqueName: \"kubernetes.io/projected/75c9640f-f412-4f63-ac86-f4d665d57038-kube-api-access-swxh5\") pod \"machine-config-server-krwk4\" (UID: \"75c9640f-f412-4f63-ac86-f4d665d57038\") " pod="openshift-machine-config-operator/machine-config-server-krwk4" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.068293 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/efd3afce-d8bd-46c0-8bc6-ebace6984f16-serving-cert\") pod \"etcd-operator-b45778765-kln9b\" (UID: \"efd3afce-d8bd-46c0-8bc6-ebace6984f16\") " pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.068355 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wg5d\" (UniqueName: \"kubernetes.io/projected/17a9ee7f-16c3-40ce-b614-a30cee1e8d83-kube-api-access-7wg5d\") pod \"olm-operator-6b444d44fb-5q4db\" (UID: \"17a9ee7f-16c3-40ce-b614-a30cee1e8d83\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.068377 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/21cbf909-e064-465c-bb64-5b0d5c82d691-socket-dir\") pod \"csi-hostpathplugin-pcqbh\" (UID: \"21cbf909-e064-465c-bb64-5b0d5c82d691\") " pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.068406 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjxgw\" (UniqueName: \"kubernetes.io/projected/a63b9811-8d87-42cf-9773-3145399ce2b6-kube-api-access-rjxgw\") pod \"ingress-canary-fzl9j\" (UID: \"a63b9811-8d87-42cf-9773-3145399ce2b6\") " pod="openshift-ingress-canary/ingress-canary-fzl9j" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.068461 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4637ec55-c9ee-48a4-9351-6a382efe4c91-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-k884h\" (UID: \"4637ec55-c9ee-48a4-9351-6a382efe4c91\") " pod="openshift-marketplace/marketplace-operator-79b997595-k884h" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.068490 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/988022f9-58e3-429b-9940-657283113440-apiservice-cert\") pod \"packageserver-d55dfcdfc-g5ldp\" (UID: \"988022f9-58e3-429b-9940-657283113440\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.068807 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a63b9811-8d87-42cf-9773-3145399ce2b6-cert\") pod \"ingress-canary-fzl9j\" (UID: \"a63b9811-8d87-42cf-9773-3145399ce2b6\") " pod="openshift-ingress-canary/ingress-canary-fzl9j" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.068903 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/b74506b2-3cd6-4803-9a11-69fa714e570e-signing-key\") pod \"service-ca-9c57cc56f-6gfhg\" (UID: \"b74506b2-3cd6-4803-9a11-69fa714e570e\") " pod="openshift-service-ca/service-ca-9c57cc56f-6gfhg" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.068954 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/5d53487c-6d56-443c-94d3-899cd8be9666-profile-collector-cert\") pod \"catalog-operator-68c6474976-bmdnh\" (UID: \"5d53487c-6d56-443c-94d3-899cd8be9666\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bmdnh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.069067 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/efd3afce-d8bd-46c0-8bc6-ebace6984f16-config\") pod \"etcd-operator-b45778765-kln9b\" (UID: \"efd3afce-d8bd-46c0-8bc6-ebace6984f16\") " pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.069100 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6170f687-30e2-44b0-860e-ddcee4e4f2d4-secret-volume\") pod \"collect-profiles-29399835-8xp54\" (UID: \"6170f687-30e2-44b0-860e-ddcee4e4f2d4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.069174 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-console-config\") pod \"console-f9d7485db-zqgfl\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.069205 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/21cbf909-e064-465c-bb64-5b0d5c82d691-csi-data-dir\") pod \"csi-hostpathplugin-pcqbh\" (UID: \"21cbf909-e064-465c-bb64-5b0d5c82d691\") " pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.069287 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/2c2dcfde-39a1-42d0-ba00-d18199782ba5-images\") pod \"machine-config-operator-74547568cd-x9lf7\" (UID: \"2c2dcfde-39a1-42d0-ba00-d18199782ba5\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-x9lf7" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.069333 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/157eb7c6-f1df-4336-8796-d100eda6102d-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-gkvs9\" (UID: \"157eb7c6-f1df-4336-8796-d100eda6102d\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gkvs9" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.069364 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/750f36ae-2e78-4a6d-8e78-e315d507d436-registry-tls\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.069412 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/750f36ae-2e78-4a6d-8e78-e315d507d436-bound-sa-token\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.069461 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/750f36ae-2e78-4a6d-8e78-e315d507d436-trusted-ca\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.069486 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbqjh\" (UniqueName: \"kubernetes.io/projected/988022f9-58e3-429b-9940-657283113440-kube-api-access-vbqjh\") pod \"packageserver-d55dfcdfc-g5ldp\" (UID: \"988022f9-58e3-429b-9940-657283113440\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" Nov 24 13:20:31 crc kubenswrapper[5039]: E1124 13:20:31.069946 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:31.569921882 +0000 UTC m=+144.009046382 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.071753 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-57szn\" (UniqueName: \"kubernetes.io/projected/1135d11d-6ba1-46f6-b01d-0cb7529831e7-kube-api-access-57szn\") pod \"service-ca-operator-777779d784-md284\" (UID: \"1135d11d-6ba1-46f6-b01d-0cb7529831e7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-md284" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.073622 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/750f36ae-2e78-4a6d-8e78-e315d507d436-ca-trust-extracted\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.075413 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4f19e739-3cb7-4007-8fce-7bfb0f76e0a4-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-6kn6v\" (UID: \"4f19e739-3cb7-4007-8fce-7bfb0f76e0a4\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6kn6v" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.075522 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6170f687-30e2-44b0-860e-ddcee4e4f2d4-config-volume\") pod \"collect-profiles-29399835-8xp54\" (UID: \"6170f687-30e2-44b0-860e-ddcee4e4f2d4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.075568 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23c83b86-90ee-4d41-9362-feaba87dfc0c-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-8t5gh\" (UID: \"23c83b86-90ee-4d41-9362-feaba87dfc0c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8t5gh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.075609 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eded421c-1f8d-4719-ac97-30116f0eda31-config-volume\") pod \"dns-default-29j84\" (UID: \"eded421c-1f8d-4719-ac97-30116f0eda31\") " pod="openshift-dns/dns-default-29j84" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.075647 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2c2dcfde-39a1-42d0-ba00-d18199782ba5-auth-proxy-config\") pod \"machine-config-operator-74547568cd-x9lf7\" (UID: \"2c2dcfde-39a1-42d0-ba00-d18199782ba5\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-x9lf7" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.075684 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-oauth-serving-cert\") pod \"console-f9d7485db-zqgfl\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.075724 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/21cbf909-e064-465c-bb64-5b0d5c82d691-plugins-dir\") pod \"csi-hostpathplugin-pcqbh\" (UID: \"21cbf909-e064-465c-bb64-5b0d5c82d691\") " pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.075760 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/2d22e4bd-1186-43a6-bc4f-d313377c78f3-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-ndmnz\" (UID: \"2d22e4bd-1186-43a6-bc4f-d313377c78f3\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ndmnz" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.076078 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-service-ca\") pod \"console-f9d7485db-zqgfl\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.076122 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/eded421c-1f8d-4719-ac97-30116f0eda31-metrics-tls\") pod \"dns-default-29j84\" (UID: \"eded421c-1f8d-4719-ac97-30116f0eda31\") " pod="openshift-dns/dns-default-29j84" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.079127 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/2c2dcfde-39a1-42d0-ba00-d18199782ba5-proxy-tls\") pod \"machine-config-operator-74547568cd-x9lf7\" (UID: \"2c2dcfde-39a1-42d0-ba00-d18199782ba5\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-x9lf7" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.079780 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mx8zv" event={"ID":"f1d33349-6b41-43a0-9aa9-03084435fd75","Type":"ContainerStarted","Data":"2ed2c60d8daa94b71492da5e9c72e20a6188d2b438d132a578bc7856f8c1da18"} Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.080019 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2c2dcfde-39a1-42d0-ba00-d18199782ba5-auth-proxy-config\") pod \"machine-config-operator-74547568cd-x9lf7\" (UID: \"2c2dcfde-39a1-42d0-ba00-d18199782ba5\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-x9lf7" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.081076 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-console-oauth-config\") pod \"console-f9d7485db-zqgfl\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.081170 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/750f36ae-2e78-4a6d-8e78-e315d507d436-registry-tls\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.081874 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/2c2dcfde-39a1-42d0-ba00-d18199782ba5-images\") pod \"machine-config-operator-74547568cd-x9lf7\" (UID: \"2c2dcfde-39a1-42d0-ba00-d18199782ba5\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-x9lf7" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.082135 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-console-config\") pod \"console-f9d7485db-zqgfl\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.082882 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4787aef-1d33-49b7-b5c3-7b0404bab7f5-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-blxxm\" (UID: \"f4787aef-1d33-49b7-b5c3-7b0404bab7f5\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-blxxm" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.083466 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-oauth-serving-cert\") pod \"console-f9d7485db-zqgfl\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.083705 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20f8a1d6-8417-4cd3-9af7-b8e8f05f4c52-config\") pod \"kube-apiserver-operator-766d6c64bb-jbfln\" (UID: \"20f8a1d6-8417-4cd3-9af7-b8e8f05f4c52\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jbfln" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.083824 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/750f36ae-2e78-4a6d-8e78-e315d507d436-trusted-ca\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.083965 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/20f8a1d6-8417-4cd3-9af7-b8e8f05f4c52-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-jbfln\" (UID: \"20f8a1d6-8417-4cd3-9af7-b8e8f05f4c52\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jbfln" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.086638 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-trusted-ca-bundle\") pod \"console-f9d7485db-zqgfl\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.091846 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/750f36ae-2e78-4a6d-8e78-e315d507d436-registry-certificates\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.093706 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-4hh9n" event={"ID":"25637e2c-a1e3-4449-a549-7b081d0c4c4c","Type":"ContainerStarted","Data":"05fb2f77eeb0d345a4d9dfc4d7cd159a41dc3c486a20ed596310650f3e905e31"} Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.095889 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/792734d4-55a5-4a6f-be71-52a83b22c73f-metrics-tls\") pod \"dns-operator-744455d44c-k4227\" (UID: \"792734d4-55a5-4a6f-be71-52a83b22c73f\") " pod="openshift-dns-operator/dns-operator-744455d44c-k4227" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.097294 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-console-serving-cert\") pod \"console-f9d7485db-zqgfl\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.110241 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f4787aef-1d33-49b7-b5c3-7b0404bab7f5-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-blxxm\" (UID: \"f4787aef-1d33-49b7-b5c3-7b0404bab7f5\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-blxxm" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.113526 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-c886j" event={"ID":"a9b8d64c-e7f3-4751-865c-c162aab7badd","Type":"ContainerStarted","Data":"90a6f1d708eb2de784685ce648f2a8d26cb5e680bc0d7895b7340452633fd08e"} Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.117343 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/750f36ae-2e78-4a6d-8e78-e315d507d436-installation-pull-secrets\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.134237 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-98dp9" event={"ID":"c7032d1d-5aae-4e50-b10f-3df40a0cd983","Type":"ContainerStarted","Data":"be4e741e1ccc5c5d4353f637a3d458b95d1c4f92e5dfcc97d8c7a239004f5bf9"} Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.134317 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-98dp9" event={"ID":"c7032d1d-5aae-4e50-b10f-3df40a0cd983","Type":"ContainerStarted","Data":"39f8d22e4986eba73f23da7acba0e74de36395aa48f0cc85a79af51ccdcc0580"} Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.135723 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-98dp9" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.138850 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-56bpb" event={"ID":"e04521cd-f63c-40ba-a296-a34d7ce739d7","Type":"ContainerStarted","Data":"c8d718d375842690163002f4fccae8ca05b2e2da57688ada908ed0171d19dfcc"} Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.138968 5039 patch_prober.go:28] interesting pod/downloads-7954f5f757-98dp9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.139009 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-98dp9" podUID="c7032d1d-5aae-4e50-b10f-3df40a0cd983" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.141454 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6hg5\" (UniqueName: \"kubernetes.io/projected/750f36ae-2e78-4a6d-8e78-e315d507d436-kube-api-access-x6hg5\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.141925 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nssgn\" (UniqueName: \"kubernetes.io/projected/f4787aef-1d33-49b7-b5c3-7b0404bab7f5-kube-api-access-nssgn\") pod \"openshift-controller-manager-operator-756b6f6bc6-blxxm\" (UID: \"f4787aef-1d33-49b7-b5c3-7b0404bab7f5\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-blxxm" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.144815 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5dflw" event={"ID":"95afc2a3-7cfb-4a24-b555-5e8c0d21e044","Type":"ContainerStarted","Data":"aa73827c6c30a5e9677ea210abfacc6dd02e01c9812b2ff4a49b5930618fa28f"} Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.156952 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-blxxm" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.157734 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-n2dwh" event={"ID":"ae5ca663-7edb-49dd-a7a7-668eeace13f7","Type":"ContainerStarted","Data":"a7a0b848b8334685fc57c0a774748a03828b18b45d3d5ab3341b05309d95beb8"} Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.163663 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdksb\" (UniqueName: \"kubernetes.io/projected/792734d4-55a5-4a6f-be71-52a83b22c73f-kube-api-access-gdksb\") pod \"dns-operator-744455d44c-k4227\" (UID: \"792734d4-55a5-4a6f-be71-52a83b22c73f\") " pod="openshift-dns-operator/dns-operator-744455d44c-k4227" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.164597 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-t958r"] Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.174320 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" event={"ID":"a6decbe9-edda-413b-b067-665ccf6efece","Type":"ContainerStarted","Data":"0ca39ffe9c5083956a5ed8faa8eb0c056cc2f0a50327923a03d561f4aa22fac1"} Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.176749 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9dq5z" event={"ID":"6e075f32-2803-4a2e-bf1a-0b1858adabf0","Type":"ContainerStarted","Data":"dc68d25e212f7dad86ff42d3b4419d8ea67ed64fb5afe236989113d126e7c14c"} Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.177703 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s48b6\" (UniqueName: \"kubernetes.io/projected/5d53487c-6d56-443c-94d3-899cd8be9666-kube-api-access-s48b6\") pod \"catalog-operator-68c6474976-bmdnh\" (UID: \"5d53487c-6d56-443c-94d3-899cd8be9666\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bmdnh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.177766 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/21cbf909-e064-465c-bb64-5b0d5c82d691-registration-dir\") pod \"csi-hostpathplugin-pcqbh\" (UID: \"21cbf909-e064-465c-bb64-5b0d5c82d691\") " pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.177815 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spd47\" (UniqueName: \"kubernetes.io/projected/b74506b2-3cd6-4803-9a11-69fa714e570e-kube-api-access-spd47\") pod \"service-ca-9c57cc56f-6gfhg\" (UID: \"b74506b2-3cd6-4803-9a11-69fa714e570e\") " pod="openshift-service-ca/service-ca-9c57cc56f-6gfhg" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.177846 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/1988c73c-a04d-4b50-af92-54dfc2a4a262-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-4qp6m\" (UID: \"1988c73c-a04d-4b50-af92-54dfc2a4a262\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4qp6m" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.177874 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/75c9640f-f412-4f63-ac86-f4d665d57038-certs\") pod \"machine-config-server-krwk4\" (UID: \"75c9640f-f412-4f63-ac86-f4d665d57038\") " pod="openshift-machine-config-operator/machine-config-server-krwk4" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.177906 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjtwg\" (UniqueName: \"kubernetes.io/projected/2d22e4bd-1186-43a6-bc4f-d313377c78f3-kube-api-access-hjtwg\") pod \"package-server-manager-789f6589d5-ndmnz\" (UID: \"2d22e4bd-1186-43a6-bc4f-d313377c78f3\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ndmnz" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.177937 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.177967 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkrjz\" (UniqueName: \"kubernetes.io/projected/4637ec55-c9ee-48a4-9351-6a382efe4c91-kube-api-access-fkrjz\") pod \"marketplace-operator-79b997595-k884h\" (UID: \"4637ec55-c9ee-48a4-9351-6a382efe4c91\") " pod="openshift-marketplace/marketplace-operator-79b997595-k884h" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.177991 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1135d11d-6ba1-46f6-b01d-0cb7529831e7-serving-cert\") pod \"service-ca-operator-777779d784-md284\" (UID: \"1135d11d-6ba1-46f6-b01d-0cb7529831e7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-md284" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178014 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/efd3afce-d8bd-46c0-8bc6-ebace6984f16-etcd-client\") pod \"etcd-operator-b45778765-kln9b\" (UID: \"efd3afce-d8bd-46c0-8bc6-ebace6984f16\") " pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178040 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/157eb7c6-f1df-4336-8796-d100eda6102d-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-gkvs9\" (UID: \"157eb7c6-f1df-4336-8796-d100eda6102d\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gkvs9" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178063 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkkhz\" (UniqueName: \"kubernetes.io/projected/4f19e739-3cb7-4007-8fce-7bfb0f76e0a4-kube-api-access-zkkhz\") pod \"machine-config-controller-84d6567774-6kn6v\" (UID: \"4f19e739-3cb7-4007-8fce-7bfb0f76e0a4\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6kn6v" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178087 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/988022f9-58e3-429b-9940-657283113440-tmpfs\") pod \"packageserver-d55dfcdfc-g5ldp\" (UID: \"988022f9-58e3-429b-9940-657283113440\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178107 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/75c9640f-f412-4f63-ac86-f4d665d57038-node-bootstrap-token\") pod \"machine-config-server-krwk4\" (UID: \"75c9640f-f412-4f63-ac86-f4d665d57038\") " pod="openshift-machine-config-operator/machine-config-server-krwk4" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178142 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j989k\" (UniqueName: \"kubernetes.io/projected/eded421c-1f8d-4719-ac97-30116f0eda31-kube-api-access-j989k\") pod \"dns-default-29j84\" (UID: \"eded421c-1f8d-4719-ac97-30116f0eda31\") " pod="openshift-dns/dns-default-29j84" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178167 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1135d11d-6ba1-46f6-b01d-0cb7529831e7-config\") pod \"service-ca-operator-777779d784-md284\" (UID: \"1135d11d-6ba1-46f6-b01d-0cb7529831e7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-md284" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178190 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/4f19e739-3cb7-4007-8fce-7bfb0f76e0a4-proxy-tls\") pod \"machine-config-controller-84d6567774-6kn6v\" (UID: \"4f19e739-3cb7-4007-8fce-7bfb0f76e0a4\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6kn6v" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178213 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/21cbf909-e064-465c-bb64-5b0d5c82d691-mountpoint-dir\") pod \"csi-hostpathplugin-pcqbh\" (UID: \"21cbf909-e064-465c-bb64-5b0d5c82d691\") " pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178236 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4fk5g\" (UniqueName: \"kubernetes.io/projected/1988c73c-a04d-4b50-af92-54dfc2a4a262-kube-api-access-4fk5g\") pod \"control-plane-machine-set-operator-78cbb6b69f-4qp6m\" (UID: \"1988c73c-a04d-4b50-af92-54dfc2a4a262\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4qp6m" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178266 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/988022f9-58e3-429b-9940-657283113440-webhook-cert\") pod \"packageserver-d55dfcdfc-g5ldp\" (UID: \"988022f9-58e3-429b-9940-657283113440\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178292 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqxxt\" (UniqueName: \"kubernetes.io/projected/21cbf909-e064-465c-bb64-5b0d5c82d691-kube-api-access-gqxxt\") pod \"csi-hostpathplugin-pcqbh\" (UID: \"21cbf909-e064-465c-bb64-5b0d5c82d691\") " pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178346 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23c83b86-90ee-4d41-9362-feaba87dfc0c-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-8t5gh\" (UID: \"23c83b86-90ee-4d41-9362-feaba87dfc0c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8t5gh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178368 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/157eb7c6-f1df-4336-8796-d100eda6102d-config\") pod \"kube-controller-manager-operator-78b949d7b-gkvs9\" (UID: \"157eb7c6-f1df-4336-8796-d100eda6102d\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gkvs9" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178411 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4227\" (UniqueName: \"kubernetes.io/projected/23c83b86-90ee-4d41-9362-feaba87dfc0c-kube-api-access-r4227\") pod \"kube-storage-version-migrator-operator-b67b599dd-8t5gh\" (UID: \"23c83b86-90ee-4d41-9362-feaba87dfc0c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8t5gh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178451 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/17a9ee7f-16c3-40ce-b614-a30cee1e8d83-srv-cert\") pod \"olm-operator-6b444d44fb-5q4db\" (UID: \"17a9ee7f-16c3-40ce-b614-a30cee1e8d83\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178473 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/21cbf909-e064-465c-bb64-5b0d5c82d691-registration-dir\") pod \"csi-hostpathplugin-pcqbh\" (UID: \"21cbf909-e064-465c-bb64-5b0d5c82d691\") " pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178486 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8m6p\" (UniqueName: \"kubernetes.io/projected/efd3afce-d8bd-46c0-8bc6-ebace6984f16-kube-api-access-s8m6p\") pod \"etcd-operator-b45778765-kln9b\" (UID: \"efd3afce-d8bd-46c0-8bc6-ebace6984f16\") " pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178544 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hb55z\" (UniqueName: \"kubernetes.io/projected/55fb48e8-0db2-4fe4-b3f8-1ede33b3a49d-kube-api-access-hb55z\") pod \"migrator-59844c95c7-2l986\" (UID: \"55fb48e8-0db2-4fe4-b3f8-1ede33b3a49d\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2l986" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178563 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/17a9ee7f-16c3-40ce-b614-a30cee1e8d83-profile-collector-cert\") pod \"olm-operator-6b444d44fb-5q4db\" (UID: \"17a9ee7f-16c3-40ce-b614-a30cee1e8d83\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178615 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/5d53487c-6d56-443c-94d3-899cd8be9666-srv-cert\") pod \"catalog-operator-68c6474976-bmdnh\" (UID: \"5d53487c-6d56-443c-94d3-899cd8be9666\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bmdnh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178676 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fm2tl\" (UniqueName: \"kubernetes.io/projected/6170f687-30e2-44b0-860e-ddcee4e4f2d4-kube-api-access-fm2tl\") pod \"collect-profiles-29399835-8xp54\" (UID: \"6170f687-30e2-44b0-860e-ddcee4e4f2d4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178712 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4637ec55-c9ee-48a4-9351-6a382efe4c91-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-k884h\" (UID: \"4637ec55-c9ee-48a4-9351-6a382efe4c91\") " pod="openshift-marketplace/marketplace-operator-79b997595-k884h" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178731 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/efd3afce-d8bd-46c0-8bc6-ebace6984f16-etcd-ca\") pod \"etcd-operator-b45778765-kln9b\" (UID: \"efd3afce-d8bd-46c0-8bc6-ebace6984f16\") " pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178748 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/b74506b2-3cd6-4803-9a11-69fa714e570e-signing-cabundle\") pod \"service-ca-9c57cc56f-6gfhg\" (UID: \"b74506b2-3cd6-4803-9a11-69fa714e570e\") " pod="openshift-service-ca/service-ca-9c57cc56f-6gfhg" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178773 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swxh5\" (UniqueName: \"kubernetes.io/projected/75c9640f-f412-4f63-ac86-f4d665d57038-kube-api-access-swxh5\") pod \"machine-config-server-krwk4\" (UID: \"75c9640f-f412-4f63-ac86-f4d665d57038\") " pod="openshift-machine-config-operator/machine-config-server-krwk4" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178797 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/efd3afce-d8bd-46c0-8bc6-ebace6984f16-etcd-service-ca\") pod \"etcd-operator-b45778765-kln9b\" (UID: \"efd3afce-d8bd-46c0-8bc6-ebace6984f16\") " pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178815 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/efd3afce-d8bd-46c0-8bc6-ebace6984f16-serving-cert\") pod \"etcd-operator-b45778765-kln9b\" (UID: \"efd3afce-d8bd-46c0-8bc6-ebace6984f16\") " pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178840 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wg5d\" (UniqueName: \"kubernetes.io/projected/17a9ee7f-16c3-40ce-b614-a30cee1e8d83-kube-api-access-7wg5d\") pod \"olm-operator-6b444d44fb-5q4db\" (UID: \"17a9ee7f-16c3-40ce-b614-a30cee1e8d83\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178858 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/21cbf909-e064-465c-bb64-5b0d5c82d691-socket-dir\") pod \"csi-hostpathplugin-pcqbh\" (UID: \"21cbf909-e064-465c-bb64-5b0d5c82d691\") " pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178884 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjxgw\" (UniqueName: \"kubernetes.io/projected/a63b9811-8d87-42cf-9773-3145399ce2b6-kube-api-access-rjxgw\") pod \"ingress-canary-fzl9j\" (UID: \"a63b9811-8d87-42cf-9773-3145399ce2b6\") " pod="openshift-ingress-canary/ingress-canary-fzl9j" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178906 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4637ec55-c9ee-48a4-9351-6a382efe4c91-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-k884h\" (UID: \"4637ec55-c9ee-48a4-9351-6a382efe4c91\") " pod="openshift-marketplace/marketplace-operator-79b997595-k884h" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178930 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/988022f9-58e3-429b-9940-657283113440-apiservice-cert\") pod \"packageserver-d55dfcdfc-g5ldp\" (UID: \"988022f9-58e3-429b-9940-657283113440\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178945 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a63b9811-8d87-42cf-9773-3145399ce2b6-cert\") pod \"ingress-canary-fzl9j\" (UID: \"a63b9811-8d87-42cf-9773-3145399ce2b6\") " pod="openshift-ingress-canary/ingress-canary-fzl9j" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178974 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/5d53487c-6d56-443c-94d3-899cd8be9666-profile-collector-cert\") pod \"catalog-operator-68c6474976-bmdnh\" (UID: \"5d53487c-6d56-443c-94d3-899cd8be9666\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bmdnh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.178992 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/b74506b2-3cd6-4803-9a11-69fa714e570e-signing-key\") pod \"service-ca-9c57cc56f-6gfhg\" (UID: \"b74506b2-3cd6-4803-9a11-69fa714e570e\") " pod="openshift-service-ca/service-ca-9c57cc56f-6gfhg" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.179027 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/efd3afce-d8bd-46c0-8bc6-ebace6984f16-config\") pod \"etcd-operator-b45778765-kln9b\" (UID: \"efd3afce-d8bd-46c0-8bc6-ebace6984f16\") " pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.179044 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6170f687-30e2-44b0-860e-ddcee4e4f2d4-secret-volume\") pod \"collect-profiles-29399835-8xp54\" (UID: \"6170f687-30e2-44b0-860e-ddcee4e4f2d4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.179095 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/21cbf909-e064-465c-bb64-5b0d5c82d691-csi-data-dir\") pod \"csi-hostpathplugin-pcqbh\" (UID: \"21cbf909-e064-465c-bb64-5b0d5c82d691\") " pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.179123 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p"] Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.179136 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/157eb7c6-f1df-4336-8796-d100eda6102d-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-gkvs9\" (UID: \"157eb7c6-f1df-4336-8796-d100eda6102d\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gkvs9" Nov 24 13:20:31 crc kubenswrapper[5039]: E1124 13:20:31.179453 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:31.679434487 +0000 UTC m=+144.118559207 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.180804 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/988022f9-58e3-429b-9940-657283113440-tmpfs\") pod \"packageserver-d55dfcdfc-g5ldp\" (UID: \"988022f9-58e3-429b-9940-657283113440\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.182543 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/efd3afce-d8bd-46c0-8bc6-ebace6984f16-etcd-service-ca\") pod \"etcd-operator-b45778765-kln9b\" (UID: \"efd3afce-d8bd-46c0-8bc6-ebace6984f16\") " pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.182646 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/21cbf909-e064-465c-bb64-5b0d5c82d691-socket-dir\") pod \"csi-hostpathplugin-pcqbh\" (UID: \"21cbf909-e064-465c-bb64-5b0d5c82d691\") " pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.183203 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1135d11d-6ba1-46f6-b01d-0cb7529831e7-config\") pod \"service-ca-operator-777779d784-md284\" (UID: \"1135d11d-6ba1-46f6-b01d-0cb7529831e7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-md284" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.183542 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/21cbf909-e064-465c-bb64-5b0d5c82d691-mountpoint-dir\") pod \"csi-hostpathplugin-pcqbh\" (UID: \"21cbf909-e064-465c-bb64-5b0d5c82d691\") " pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.188132 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/75c9640f-f412-4f63-ac86-f4d665d57038-node-bootstrap-token\") pod \"machine-config-server-krwk4\" (UID: \"75c9640f-f412-4f63-ac86-f4d665d57038\") " pod="openshift-machine-config-operator/machine-config-server-krwk4" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.189489 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/4f19e739-3cb7-4007-8fce-7bfb0f76e0a4-proxy-tls\") pod \"machine-config-controller-84d6567774-6kn6v\" (UID: \"4f19e739-3cb7-4007-8fce-7bfb0f76e0a4\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6kn6v" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.189648 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbqjh\" (UniqueName: \"kubernetes.io/projected/988022f9-58e3-429b-9940-657283113440-kube-api-access-vbqjh\") pod \"packageserver-d55dfcdfc-g5ldp\" (UID: \"988022f9-58e3-429b-9940-657283113440\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.189689 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4f19e739-3cb7-4007-8fce-7bfb0f76e0a4-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-6kn6v\" (UID: \"4f19e739-3cb7-4007-8fce-7bfb0f76e0a4\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6kn6v" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.189868 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-k4227" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.189868 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/efd3afce-d8bd-46c0-8bc6-ebace6984f16-etcd-ca\") pod \"etcd-operator-b45778765-kln9b\" (UID: \"efd3afce-d8bd-46c0-8bc6-ebace6984f16\") " pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.189973 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-57szn\" (UniqueName: \"kubernetes.io/projected/1135d11d-6ba1-46f6-b01d-0cb7529831e7-kube-api-access-57szn\") pod \"service-ca-operator-777779d784-md284\" (UID: \"1135d11d-6ba1-46f6-b01d-0cb7529831e7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-md284" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.190001 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6170f687-30e2-44b0-860e-ddcee4e4f2d4-config-volume\") pod \"collect-profiles-29399835-8xp54\" (UID: \"6170f687-30e2-44b0-860e-ddcee4e4f2d4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.190026 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23c83b86-90ee-4d41-9362-feaba87dfc0c-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-8t5gh\" (UID: \"23c83b86-90ee-4d41-9362-feaba87dfc0c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8t5gh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.190048 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eded421c-1f8d-4719-ac97-30116f0eda31-config-volume\") pod \"dns-default-29j84\" (UID: \"eded421c-1f8d-4719-ac97-30116f0eda31\") " pod="openshift-dns/dns-default-29j84" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.190057 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7tbf\" (UniqueName: \"kubernetes.io/projected/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-kube-api-access-v7tbf\") pod \"console-f9d7485db-zqgfl\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.190066 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/eded421c-1f8d-4719-ac97-30116f0eda31-metrics-tls\") pod \"dns-default-29j84\" (UID: \"eded421c-1f8d-4719-ac97-30116f0eda31\") " pod="openshift-dns/dns-default-29j84" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.190168 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/21cbf909-e064-465c-bb64-5b0d5c82d691-plugins-dir\") pod \"csi-hostpathplugin-pcqbh\" (UID: \"21cbf909-e064-465c-bb64-5b0d5c82d691\") " pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.190250 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/efd3afce-d8bd-46c0-8bc6-ebace6984f16-etcd-client\") pod \"etcd-operator-b45778765-kln9b\" (UID: \"efd3afce-d8bd-46c0-8bc6-ebace6984f16\") " pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.190222 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/2d22e4bd-1186-43a6-bc4f-d313377c78f3-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-ndmnz\" (UID: \"2d22e4bd-1186-43a6-bc4f-d313377c78f3\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ndmnz" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.190376 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/21cbf909-e064-465c-bb64-5b0d5c82d691-plugins-dir\") pod \"csi-hostpathplugin-pcqbh\" (UID: \"21cbf909-e064-465c-bb64-5b0d5c82d691\") " pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.191123 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/5d53487c-6d56-443c-94d3-899cd8be9666-srv-cert\") pod \"catalog-operator-68c6474976-bmdnh\" (UID: \"5d53487c-6d56-443c-94d3-899cd8be9666\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bmdnh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.193321 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6170f687-30e2-44b0-860e-ddcee4e4f2d4-config-volume\") pod \"collect-profiles-29399835-8xp54\" (UID: \"6170f687-30e2-44b0-860e-ddcee4e4f2d4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.193668 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4f19e739-3cb7-4007-8fce-7bfb0f76e0a4-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-6kn6v\" (UID: \"4f19e739-3cb7-4007-8fce-7bfb0f76e0a4\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6kn6v" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.194685 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eded421c-1f8d-4719-ac97-30116f0eda31-config-volume\") pod \"dns-default-29j84\" (UID: \"eded421c-1f8d-4719-ac97-30116f0eda31\") " pod="openshift-dns/dns-default-29j84" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.195494 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23c83b86-90ee-4d41-9362-feaba87dfc0c-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-8t5gh\" (UID: \"23c83b86-90ee-4d41-9362-feaba87dfc0c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8t5gh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.195820 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4637ec55-c9ee-48a4-9351-6a382efe4c91-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-k884h\" (UID: \"4637ec55-c9ee-48a4-9351-6a382efe4c91\") " pod="openshift-marketplace/marketplace-operator-79b997595-k884h" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.196147 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/21cbf909-e064-465c-bb64-5b0d5c82d691-csi-data-dir\") pod \"csi-hostpathplugin-pcqbh\" (UID: \"21cbf909-e064-465c-bb64-5b0d5c82d691\") " pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.197285 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/75c9640f-f412-4f63-ac86-f4d665d57038-certs\") pod \"machine-config-server-krwk4\" (UID: \"75c9640f-f412-4f63-ac86-f4d665d57038\") " pod="openshift-machine-config-operator/machine-config-server-krwk4" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.197976 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4637ec55-c9ee-48a4-9351-6a382efe4c91-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-k884h\" (UID: \"4637ec55-c9ee-48a4-9351-6a382efe4c91\") " pod="openshift-marketplace/marketplace-operator-79b997595-k884h" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.199173 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/750f36ae-2e78-4a6d-8e78-e315d507d436-bound-sa-token\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.199217 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/17a9ee7f-16c3-40ce-b614-a30cee1e8d83-srv-cert\") pod \"olm-operator-6b444d44fb-5q4db\" (UID: \"17a9ee7f-16c3-40ce-b614-a30cee1e8d83\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.204669 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/157eb7c6-f1df-4336-8796-d100eda6102d-config\") pod \"kube-controller-manager-operator-78b949d7b-gkvs9\" (UID: \"157eb7c6-f1df-4336-8796-d100eda6102d\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gkvs9" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.204797 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/efd3afce-d8bd-46c0-8bc6-ebace6984f16-serving-cert\") pod \"etcd-operator-b45778765-kln9b\" (UID: \"efd3afce-d8bd-46c0-8bc6-ebace6984f16\") " pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.204848 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/b74506b2-3cd6-4803-9a11-69fa714e570e-signing-cabundle\") pod \"service-ca-9c57cc56f-6gfhg\" (UID: \"b74506b2-3cd6-4803-9a11-69fa714e570e\") " pod="openshift-service-ca/service-ca-9c57cc56f-6gfhg" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.205021 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/5d53487c-6d56-443c-94d3-899cd8be9666-profile-collector-cert\") pod \"catalog-operator-68c6474976-bmdnh\" (UID: \"5d53487c-6d56-443c-94d3-899cd8be9666\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bmdnh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.205268 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/1988c73c-a04d-4b50-af92-54dfc2a4a262-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-4qp6m\" (UID: \"1988c73c-a04d-4b50-af92-54dfc2a4a262\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4qp6m" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.205721 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/157eb7c6-f1df-4336-8796-d100eda6102d-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-gkvs9\" (UID: \"157eb7c6-f1df-4336-8796-d100eda6102d\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gkvs9" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.205865 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/b74506b2-3cd6-4803-9a11-69fa714e570e-signing-key\") pod \"service-ca-9c57cc56f-6gfhg\" (UID: \"b74506b2-3cd6-4803-9a11-69fa714e570e\") " pod="openshift-service-ca/service-ca-9c57cc56f-6gfhg" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.205891 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/988022f9-58e3-429b-9940-657283113440-webhook-cert\") pod \"packageserver-d55dfcdfc-g5ldp\" (UID: \"988022f9-58e3-429b-9940-657283113440\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.206722 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/eded421c-1f8d-4719-ac97-30116f0eda31-metrics-tls\") pod \"dns-default-29j84\" (UID: \"eded421c-1f8d-4719-ac97-30116f0eda31\") " pod="openshift-dns/dns-default-29j84" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.207540 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23c83b86-90ee-4d41-9362-feaba87dfc0c-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-8t5gh\" (UID: \"23c83b86-90ee-4d41-9362-feaba87dfc0c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8t5gh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.210167 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/988022f9-58e3-429b-9940-657283113440-apiservice-cert\") pod \"packageserver-d55dfcdfc-g5ldp\" (UID: \"988022f9-58e3-429b-9940-657283113440\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.210785 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/17a9ee7f-16c3-40ce-b614-a30cee1e8d83-profile-collector-cert\") pod \"olm-operator-6b444d44fb-5q4db\" (UID: \"17a9ee7f-16c3-40ce-b614-a30cee1e8d83\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.210989 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/efd3afce-d8bd-46c0-8bc6-ebace6984f16-config\") pod \"etcd-operator-b45778765-kln9b\" (UID: \"efd3afce-d8bd-46c0-8bc6-ebace6984f16\") " pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.211357 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1135d11d-6ba1-46f6-b01d-0cb7529831e7-serving-cert\") pod \"service-ca-operator-777779d784-md284\" (UID: \"1135d11d-6ba1-46f6-b01d-0cb7529831e7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-md284" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.211926 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/20f8a1d6-8417-4cd3-9af7-b8e8f05f4c52-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-jbfln\" (UID: \"20f8a1d6-8417-4cd3-9af7-b8e8f05f4c52\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jbfln" Nov 24 13:20:31 crc kubenswrapper[5039]: W1124 13:20:31.211996 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5f7a5c45_edc8_443e_9730_f7a2eb5ab116.slice/crio-f2589831f90d9d476d2bbb3b21d932328fe04f1468b1cd506937b9174ac760f3 WatchSource:0}: Error finding container f2589831f90d9d476d2bbb3b21d932328fe04f1468b1cd506937b9174ac760f3: Status 404 returned error can't find the container with id f2589831f90d9d476d2bbb3b21d932328fe04f1468b1cd506937b9174ac760f3 Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.218130 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6170f687-30e2-44b0-860e-ddcee4e4f2d4-secret-volume\") pod \"collect-profiles-29399835-8xp54\" (UID: \"6170f687-30e2-44b0-860e-ddcee4e4f2d4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.219584 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-b8b2f"] Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.224915 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jbfln" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.225305 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/2d22e4bd-1186-43a6-bc4f-d313377c78f3-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-ndmnz\" (UID: \"2d22e4bd-1186-43a6-bc4f-d313377c78f3\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ndmnz" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.230171 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a63b9811-8d87-42cf-9773-3145399ce2b6-cert\") pod \"ingress-canary-fzl9j\" (UID: \"a63b9811-8d87-42cf-9773-3145399ce2b6\") " pod="openshift-ingress-canary/ingress-canary-fzl9j" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.239564 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8mqr\" (UniqueName: \"kubernetes.io/projected/2c2dcfde-39a1-42d0-ba00-d18199782ba5-kube-api-access-l8mqr\") pod \"machine-config-operator-74547568cd-x9lf7\" (UID: \"2c2dcfde-39a1-42d0-ba00-d18199782ba5\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-x9lf7" Nov 24 13:20:31 crc kubenswrapper[5039]: W1124 13:20:31.255703 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb073719c_394b_496f_9d64_75681184acb0.slice/crio-0366868a7747af71dafe199edf7949c6e099b4d86f888f34fbc62d142c8a8c06 WatchSource:0}: Error finding container 0366868a7747af71dafe199edf7949c6e099b4d86f888f34fbc62d142c8a8c06: Status 404 returned error can't find the container with id 0366868a7747af71dafe199edf7949c6e099b4d86f888f34fbc62d142c8a8c06 Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.274893 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s48b6\" (UniqueName: \"kubernetes.io/projected/5d53487c-6d56-443c-94d3-899cd8be9666-kube-api-access-s48b6\") pod \"catalog-operator-68c6474976-bmdnh\" (UID: \"5d53487c-6d56-443c-94d3-899cd8be9666\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bmdnh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.283092 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bmdnh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.295172 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spd47\" (UniqueName: \"kubernetes.io/projected/b74506b2-3cd6-4803-9a11-69fa714e570e-kube-api-access-spd47\") pod \"service-ca-9c57cc56f-6gfhg\" (UID: \"b74506b2-3cd6-4803-9a11-69fa714e570e\") " pod="openshift-service-ca/service-ca-9c57cc56f-6gfhg" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.295586 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:31 crc kubenswrapper[5039]: E1124 13:20:31.295888 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:31.795855631 +0000 UTC m=+144.234980131 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.296125 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:31 crc kubenswrapper[5039]: E1124 13:20:31.296584 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:31.79657713 +0000 UTC m=+144.235701630 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.311355 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-6gfhg" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.316587 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjtwg\" (UniqueName: \"kubernetes.io/projected/2d22e4bd-1186-43a6-bc4f-d313377c78f3-kube-api-access-hjtwg\") pod \"package-server-manager-789f6589d5-ndmnz\" (UID: \"2d22e4bd-1186-43a6-bc4f-d313377c78f3\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ndmnz" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.334180 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-zf42k"] Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.344579 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkrjz\" (UniqueName: \"kubernetes.io/projected/4637ec55-c9ee-48a4-9351-6a382efe4c91-kube-api-access-fkrjz\") pod \"marketplace-operator-79b997595-k884h\" (UID: \"4637ec55-c9ee-48a4-9351-6a382efe4c91\") " pod="openshift-marketplace/marketplace-operator-79b997595-k884h" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.350658 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2"] Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.351005 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ndmnz" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.351924 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xgtj9"] Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.372762 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4227\" (UniqueName: \"kubernetes.io/projected/23c83b86-90ee-4d41-9362-feaba87dfc0c-kube-api-access-r4227\") pod \"kube-storage-version-migrator-operator-b67b599dd-8t5gh\" (UID: \"23c83b86-90ee-4d41-9362-feaba87dfc0c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8t5gh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.379849 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4fk5g\" (UniqueName: \"kubernetes.io/projected/1988c73c-a04d-4b50-af92-54dfc2a4a262-kube-api-access-4fk5g\") pod \"control-plane-machine-set-operator-78cbb6b69f-4qp6m\" (UID: \"1988c73c-a04d-4b50-af92-54dfc2a4a262\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4qp6m" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.396987 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:31 crc kubenswrapper[5039]: E1124 13:20:31.397453 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:31.897437192 +0000 UTC m=+144.336561692 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.399942 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.402410 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/157eb7c6-f1df-4336-8796-d100eda6102d-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-gkvs9\" (UID: \"157eb7c6-f1df-4336-8796-d100eda6102d\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gkvs9" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.428213 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkkhz\" (UniqueName: \"kubernetes.io/projected/4f19e739-3cb7-4007-8fce-7bfb0f76e0a4-kube-api-access-zkkhz\") pod \"machine-config-controller-84d6567774-6kn6v\" (UID: \"4f19e739-3cb7-4007-8fce-7bfb0f76e0a4\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6kn6v" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.431990 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-blxxm"] Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.486088 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-k4227"] Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.487453 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j989k\" (UniqueName: \"kubernetes.io/projected/eded421c-1f8d-4719-ac97-30116f0eda31-kube-api-access-j989k\") pod \"dns-default-29j84\" (UID: \"eded421c-1f8d-4719-ac97-30116f0eda31\") " pod="openshift-dns/dns-default-29j84" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.487964 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqxxt\" (UniqueName: \"kubernetes.io/projected/21cbf909-e064-465c-bb64-5b0d5c82d691-kube-api-access-gqxxt\") pod \"csi-hostpathplugin-pcqbh\" (UID: \"21cbf909-e064-465c-bb64-5b0d5c82d691\") " pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.493732 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fm2tl\" (UniqueName: \"kubernetes.io/projected/6170f687-30e2-44b0-860e-ddcee4e4f2d4-kube-api-access-fm2tl\") pod \"collect-profiles-29399835-8xp54\" (UID: \"6170f687-30e2-44b0-860e-ddcee4e4f2d4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.498934 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:31 crc kubenswrapper[5039]: E1124 13:20:31.499210 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:31.999199158 +0000 UTC m=+144.438323658 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.515480 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wg5d\" (UniqueName: \"kubernetes.io/projected/17a9ee7f-16c3-40ce-b614-a30cee1e8d83-kube-api-access-7wg5d\") pod \"olm-operator-6b444d44fb-5q4db\" (UID: \"17a9ee7f-16c3-40ce-b614-a30cee1e8d83\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.531874 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-x9lf7" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.533573 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swxh5\" (UniqueName: \"kubernetes.io/projected/75c9640f-f412-4f63-ac86-f4d665d57038-kube-api-access-swxh5\") pod \"machine-config-server-krwk4\" (UID: \"75c9640f-f412-4f63-ac86-f4d665d57038\") " pod="openshift-machine-config-operator/machine-config-server-krwk4" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.542218 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbqjh\" (UniqueName: \"kubernetes.io/projected/988022f9-58e3-429b-9940-657283113440-kube-api-access-vbqjh\") pod \"packageserver-d55dfcdfc-g5ldp\" (UID: \"988022f9-58e3-429b-9940-657283113440\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.549470 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6kn6v" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.558002 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-57szn\" (UniqueName: \"kubernetes.io/projected/1135d11d-6ba1-46f6-b01d-0cb7529831e7-kube-api-access-57szn\") pod \"service-ca-operator-777779d784-md284\" (UID: \"1135d11d-6ba1-46f6-b01d-0cb7529831e7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-md284" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.570766 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gkvs9" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.576006 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8m6p\" (UniqueName: \"kubernetes.io/projected/efd3afce-d8bd-46c0-8bc6-ebace6984f16-kube-api-access-s8m6p\") pod \"etcd-operator-b45778765-kln9b\" (UID: \"efd3afce-d8bd-46c0-8bc6-ebace6984f16\") " pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.577054 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-k884h" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.591118 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8t5gh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.593694 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hb55z\" (UniqueName: \"kubernetes.io/projected/55fb48e8-0db2-4fe4-b3f8-1ede33b3a49d-kube-api-access-hb55z\") pod \"migrator-59844c95c7-2l986\" (UID: \"55fb48e8-0db2-4fe4-b3f8-1ede33b3a49d\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2l986" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.596440 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.599360 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:31 crc kubenswrapper[5039]: E1124 13:20:31.599981 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:32.099932786 +0000 UTC m=+144.539057286 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.603462 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-md284" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.616228 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjxgw\" (UniqueName: \"kubernetes.io/projected/a63b9811-8d87-42cf-9773-3145399ce2b6-kube-api-access-rjxgw\") pod \"ingress-canary-fzl9j\" (UID: \"a63b9811-8d87-42cf-9773-3145399ce2b6\") " pod="openshift-ingress-canary/ingress-canary-fzl9j" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.618743 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-krwk4" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.624864 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.632767 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4qp6m" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.641343 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.656489 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-29j84" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.658012 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jbfln"] Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.664918 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-fzl9j" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.690775 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.692866 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bmdnh"] Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.701546 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:31 crc kubenswrapper[5039]: E1124 13:20:31.701867 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:32.201853376 +0000 UTC m=+144.640977876 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.802247 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:31 crc kubenswrapper[5039]: E1124 13:20:31.802406 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:32.30238361 +0000 UTC m=+144.741508110 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.802781 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:31 crc kubenswrapper[5039]: E1124 13:20:31.803060 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:32.303048588 +0000 UTC m=+144.742173088 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.846149 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2l986" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.858855 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.904340 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:31 crc kubenswrapper[5039]: E1124 13:20:31.904778 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:32.404757462 +0000 UTC m=+144.843881962 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.978387 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ndmnz"] Nov 24 13:20:31 crc kubenswrapper[5039]: I1124 13:20:31.985646 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-6gfhg"] Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.039574 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:32 crc kubenswrapper[5039]: E1124 13:20:32.039888 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:32.539876528 +0000 UTC m=+144.979001028 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:32 crc kubenswrapper[5039]: W1124 13:20:32.095868 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2d22e4bd_1186_43a6_bc4f_d313377c78f3.slice/crio-06d5aeb85f8041abbb95d82fbe6ee9b451b172bf4296d63d74b961d117e3d11e WatchSource:0}: Error finding container 06d5aeb85f8041abbb95d82fbe6ee9b451b172bf4296d63d74b961d117e3d11e: Status 404 returned error can't find the container with id 06d5aeb85f8041abbb95d82fbe6ee9b451b172bf4296d63d74b961d117e3d11e Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.140681 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:32 crc kubenswrapper[5039]: E1124 13:20:32.141579 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:32.641544461 +0000 UTC m=+145.080668961 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.179284 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-zqgfl"] Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.195787 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4qp6m"] Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.235050 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-98dp9" podStartSLOduration=119.235033993 podStartE2EDuration="1m59.235033993s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:32.233640567 +0000 UTC m=+144.672765057" watchObservedRunningTime="2025-11-24 13:20:32.235033993 +0000 UTC m=+144.674158493" Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.243163 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:32 crc kubenswrapper[5039]: E1124 13:20:32.243684 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:32.743659306 +0000 UTC m=+145.182783996 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.248956 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-zf42k" event={"ID":"1f41fa87-ea96-4457-92a1-8bb69acc8b0e","Type":"ContainerStarted","Data":"458f9c4a48f683f7e8894bbef777debe8bbf191cc2d9d764370f729596c4ba98"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.250723 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ndmnz" event={"ID":"2d22e4bd-1186-43a6-bc4f-d313377c78f3","Type":"ContainerStarted","Data":"06d5aeb85f8041abbb95d82fbe6ee9b451b172bf4296d63d74b961d117e3d11e"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.253806 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5dflw" event={"ID":"95afc2a3-7cfb-4a24-b555-5e8c0d21e044","Type":"ContainerStarted","Data":"5e2091ca9cc172e2905860a1c0d499eadacba34da12852d805f20b1b3eedfb08"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.258095 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-t958r" event={"ID":"5f7a5c45-edc8-443e-9730-f7a2eb5ab116","Type":"ContainerStarted","Data":"f2589831f90d9d476d2bbb3b21d932328fe04f1468b1cd506937b9174ac760f3"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.259313 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2" event={"ID":"32c545f6-2f66-4212-a7d0-01eab2f40da7","Type":"ContainerStarted","Data":"aacaa43b3a8bdaeba14de3d6fbebe075bf77186aef989b8163462e932d766d3c"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.261133 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" event={"ID":"0883a675-24bc-4d9b-b318-feee05e49135","Type":"ContainerStarted","Data":"d4f0f0f81919e0eff3f93e73c806d1fad65adaa1b3d548e5314e6ca872e954b2"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.263900 5039 generic.go:334] "Generic (PLEG): container finished" podID="bc7f5002-5906-428a-bb9e-c3507cc151c8" containerID="b69968087c826420502a3f0733258ce70dcb42adc60a45588905425c632a403e" exitCode=0 Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.263975 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" event={"ID":"bc7f5002-5906-428a-bb9e-c3507cc151c8","Type":"ContainerDied","Data":"b69968087c826420502a3f0733258ce70dcb42adc60a45588905425c632a403e"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.269734 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-4hh9n" event={"ID":"25637e2c-a1e3-4449-a549-7b081d0c4c4c","Type":"ContainerStarted","Data":"b0d145255052d33a67c7d43f3e2b6e04592ea6e13a408eb2a03246fce31586c8"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.277796 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jbfln" event={"ID":"20f8a1d6-8417-4cd3-9af7-b8e8f05f4c52","Type":"ContainerStarted","Data":"4d68f5f9657ca0b8170b289020cbcef6f46cf3e95059e15c8de8a86294df79bc"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.279284 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bmdnh" event={"ID":"5d53487c-6d56-443c-94d3-899cd8be9666","Type":"ContainerStarted","Data":"ae304a24c71db9f8a0e4851faeb800d09dc1d175655230c5be2ecc1fb390987a"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.281420 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" event={"ID":"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d","Type":"ContainerStarted","Data":"d4e2e5e8820d35f17991dcd3a409919933802d70890887c908229520f3e1169a"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.283731 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-56bpb" event={"ID":"e04521cd-f63c-40ba-a296-a34d7ce739d7","Type":"ContainerStarted","Data":"05209eb7079a4878b0e3334bfa1537dbf75039ad6db53537da4cda76956e8a89"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.285135 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-blxxm" event={"ID":"f4787aef-1d33-49b7-b5c3-7b0404bab7f5","Type":"ContainerStarted","Data":"9f2044284f569db578e2f87a2ccbcde52ea82d07c49a5070f5f135235a30abd7"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.326051 5039 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-lv5c7 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.326099 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" podUID="a6decbe9-edda-413b-b067-665ccf6efece" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.329481 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gj47k" event={"ID":"021285de-f14a-481e-986e-8d07616865a2","Type":"ContainerStarted","Data":"b53c9c84b519e567a8761c1a7a657debd5a7bc787ed469e9583e26686d80b5c8"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.329552 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.329567 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" event={"ID":"a6decbe9-edda-413b-b067-665ccf6efece","Type":"ContainerStarted","Data":"5b0e38068a8f2cc4709f6aaa0e8433c63c1c0fabbb4ea4702466f9e1c5c81a57"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.329577 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9dq5z" event={"ID":"6e075f32-2803-4a2e-bf1a-0b1858adabf0","Type":"ContainerStarted","Data":"685dd3a1f43d33b76ae0696f7a2de630f70b6da30227dd97846d9d49922e3ede"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.331297 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-k4227" event={"ID":"792734d4-55a5-4a6f-be71-52a83b22c73f","Type":"ContainerStarted","Data":"0ac84228ef45baa532a1ed03111293241a4c606804ee12e1373d5417788154d3"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.343695 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:32 crc kubenswrapper[5039]: E1124 13:20:32.343913 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:32.843883482 +0000 UTC m=+145.283007982 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.345966 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:32 crc kubenswrapper[5039]: E1124 13:20:32.347041 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:32.847013482 +0000 UTC m=+145.286137982 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.386295 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-n2dwh" event={"ID":"ae5ca663-7edb-49dd-a7a7-668eeace13f7","Type":"ContainerStarted","Data":"2e91fbb5d2691d0303d0429acdac21140d57885a27fa18bfb0c11ee7b3ec099c"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.387658 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-n2dwh" event={"ID":"ae5ca663-7edb-49dd-a7a7-668eeace13f7","Type":"ContainerStarted","Data":"259907fc3dd32cbb20e5e407db55f966ff9dcb51698986b049b550149c4bc56e"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.398041 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mx8zv" event={"ID":"f1d33349-6b41-43a0-9aa9-03084435fd75","Type":"ContainerStarted","Data":"1ad0f45d9c4622439e9d2947fe9332f91bdb3288be174b755e7f045a768b106d"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.401277 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" event={"ID":"b073719c-394b-496f-9d64-75681184acb0","Type":"ContainerStarted","Data":"47402c0c79009b511125125e5a954a1ce7950d3777b9e7122d9b0ca1c92b9535"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.401313 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" event={"ID":"b073719c-394b-496f-9d64-75681184acb0","Type":"ContainerStarted","Data":"0366868a7747af71dafe199edf7949c6e099b4d86f888f34fbc62d142c8a8c06"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.402244 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.405445 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-6gfhg" event={"ID":"b74506b2-3cd6-4803-9a11-69fa714e570e","Type":"ContainerStarted","Data":"d7656df4d0e0534ae8bee15202bcead6e0ca3be2d33698e155db9920a070a100"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.408068 5039 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-b8b2f container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.23:6443/healthz\": dial tcp 10.217.0.23:6443: connect: connection refused" start-of-body= Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.408115 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" podUID="b073719c-394b-496f-9d64-75681184acb0" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.23:6443/healthz\": dial tcp 10.217.0.23:6443: connect: connection refused" Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.444959 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp"] Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.446638 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:32 crc kubenswrapper[5039]: E1124 13:20:32.447685 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:32.947667389 +0000 UTC m=+145.386791889 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.457609 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-c886j" event={"ID":"a9b8d64c-e7f3-4751-865c-c162aab7badd","Type":"ContainerStarted","Data":"f0f2bc5141b4f7e161d46a8e43c5500fa714f986a1ae960b8ab4a7f4d292bcb9"} Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.458444 5039 patch_prober.go:28] interesting pod/downloads-7954f5f757-98dp9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.458478 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-98dp9" podUID="c7032d1d-5aae-4e50-b10f-3df40a0cd983" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.459981 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-c886j" Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.461424 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-29j84"] Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.462824 5039 patch_prober.go:28] interesting pod/console-operator-58897d9998-c886j container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.15:8443/readyz\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.462869 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-c886j" podUID="a9b8d64c-e7f3-4751-865c-c162aab7badd" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.15:8443/readyz\": dial tcp 10.217.0.15:8443: connect: connection refused" Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.549599 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:32 crc kubenswrapper[5039]: E1124 13:20:32.553199 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:33.053180021 +0000 UTC m=+145.492304581 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.650190 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:32 crc kubenswrapper[5039]: E1124 13:20:32.650890 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:33.150872772 +0000 UTC m=+145.589997272 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.752648 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:32 crc kubenswrapper[5039]: E1124 13:20:32.753242 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:33.253222383 +0000 UTC m=+145.692346883 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.768480 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gkvs9"] Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.772563 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-6kn6v"] Nov 24 13:20:32 crc kubenswrapper[5039]: W1124 13:20:32.790811 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod988022f9_58e3_429b_9940_657283113440.slice/crio-5b0a9b980cfa0025b4af40c388491bb4e6fccfb9f008afae1ca84b65312ec907 WatchSource:0}: Error finding container 5b0a9b980cfa0025b4af40c388491bb4e6fccfb9f008afae1ca84b65312ec907: Status 404 returned error can't find the container with id 5b0a9b980cfa0025b4af40c388491bb4e6fccfb9f008afae1ca84b65312ec907 Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.856668 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:32 crc kubenswrapper[5039]: E1124 13:20:32.857305 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:33.357273877 +0000 UTC m=+145.796398377 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.903389 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-4hh9n" Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.905373 5039 patch_prober.go:28] interesting pod/router-default-5444994796-4hh9n container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.905480 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4hh9n" podUID="25637e2c-a1e3-4449-a549-7b081d0c4c4c" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.910776 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-djx5v" podStartSLOduration=119.910760478 podStartE2EDuration="1m59.910760478s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:32.909870285 +0000 UTC m=+145.348994785" watchObservedRunningTime="2025-11-24 13:20:32.910760478 +0000 UTC m=+145.349884978" Nov 24 13:20:32 crc kubenswrapper[5039]: W1124 13:20:32.945293 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4f19e739_3cb7_4007_8fce_7bfb0f76e0a4.slice/crio-0a1c11abc176a833e6d9acbadb11f7db6b190e1c9ea797658256d3f0733b01fb WatchSource:0}: Error finding container 0a1c11abc176a833e6d9acbadb11f7db6b190e1c9ea797658256d3f0733b01fb: Status 404 returned error can't find the container with id 0a1c11abc176a833e6d9acbadb11f7db6b190e1c9ea797658256d3f0733b01fb Nov 24 13:20:32 crc kubenswrapper[5039]: W1124 13:20:32.948063 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeded421c_1f8d_4719_ac97_30116f0eda31.slice/crio-5e89c2a5676259c5d7df5aa1df9fd5400ac455035119f5a1ba2c6a482cec105f WatchSource:0}: Error finding container 5e89c2a5676259c5d7df5aa1df9fd5400ac455035119f5a1ba2c6a482cec105f: Status 404 returned error can't find the container with id 5e89c2a5676259c5d7df5aa1df9fd5400ac455035119f5a1ba2c6a482cec105f Nov 24 13:20:32 crc kubenswrapper[5039]: I1124 13:20:32.958575 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:32 crc kubenswrapper[5039]: E1124 13:20:32.959175 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:33.459159786 +0000 UTC m=+145.898284286 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.059376 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:33 crc kubenswrapper[5039]: E1124 13:20:33.059646 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:33.559582827 +0000 UTC m=+145.998707327 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.059712 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:33 crc kubenswrapper[5039]: E1124 13:20:33.059979 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:33.559967457 +0000 UTC m=+145.999091957 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.126462 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-4hh9n" podStartSLOduration=120.126444533 podStartE2EDuration="2m0.126444533s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:33.124123613 +0000 UTC m=+145.563248113" watchObservedRunningTime="2025-11-24 13:20:33.126444533 +0000 UTC m=+145.565569033" Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.162776 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:33 crc kubenswrapper[5039]: E1124 13:20:33.163192 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:33.663147299 +0000 UTC m=+146.102271799 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.163346 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:33 crc kubenswrapper[5039]: E1124 13:20:33.163687 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:33.663670993 +0000 UTC m=+146.102795493 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.169653 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db"] Nov 24 13:20:33 crc kubenswrapper[5039]: W1124 13:20:33.177294 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod17a9ee7f_16c3_40ce_b614_a30cee1e8d83.slice/crio-9a1740a6b43242eeaccc4142e2ef10229ff5448c8f07eedc56e6e1f123b14a07 WatchSource:0}: Error finding container 9a1740a6b43242eeaccc4142e2ef10229ff5448c8f07eedc56e6e1f123b14a07: Status 404 returned error can't find the container with id 9a1740a6b43242eeaccc4142e2ef10229ff5448c8f07eedc56e6e1f123b14a07 Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.181885 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" podStartSLOduration=120.181858522 podStartE2EDuration="2m0.181858522s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:33.157165015 +0000 UTC m=+145.596289515" watchObservedRunningTime="2025-11-24 13:20:33.181858522 +0000 UTC m=+145.620983022" Nov 24 13:20:33 crc kubenswrapper[5039]: W1124 13:20:33.183438 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6170f687_30e2_44b0_860e_ddcee4e4f2d4.slice/crio-7a5d6fbd3434ce2d121edcce9e34e34e10173fcbb1ec851212ffd2a050f02810 WatchSource:0}: Error finding container 7a5d6fbd3434ce2d121edcce9e34e34e10173fcbb1ec851212ffd2a050f02810: Status 404 returned error can't find the container with id 7a5d6fbd3434ce2d121edcce9e34e34e10173fcbb1ec851212ffd2a050f02810 Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.189705 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54"] Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.200935 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-x9lf7"] Nov 24 13:20:33 crc kubenswrapper[5039]: W1124 13:20:33.204425 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c2dcfde_39a1_42d0_ba00_d18199782ba5.slice/crio-5862020c3c363002d2b7090d5258c7725b6359d58a8d4990753666ebb790582d WatchSource:0}: Error finding container 5862020c3c363002d2b7090d5258c7725b6359d58a8d4990753666ebb790582d: Status 404 returned error can't find the container with id 5862020c3c363002d2b7090d5258c7725b6359d58a8d4990753666ebb790582d Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.210143 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-md284"] Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.219759 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-fzl9j"] Nov 24 13:20:33 crc kubenswrapper[5039]: W1124 13:20:33.225556 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda63b9811_8d87_42cf_9773_3145399ce2b6.slice/crio-74fdde633707c9514295a46b9e824423edd0dfb9b0fc56b9bc9de9fc88393298 WatchSource:0}: Error finding container 74fdde633707c9514295a46b9e824423edd0dfb9b0fc56b9bc9de9fc88393298: Status 404 returned error can't find the container with id 74fdde633707c9514295a46b9e824423edd0dfb9b0fc56b9bc9de9fc88393298 Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.229391 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gj47k" podStartSLOduration=120.229377138 podStartE2EDuration="2m0.229377138s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:33.226969447 +0000 UTC m=+145.666093957" watchObservedRunningTime="2025-11-24 13:20:33.229377138 +0000 UTC m=+145.668501638" Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.264494 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:33 crc kubenswrapper[5039]: E1124 13:20:33.264938 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:33.764921626 +0000 UTC m=+146.204046126 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.312572 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5dflw" podStartSLOduration=120.312554105 podStartE2EDuration="2m0.312554105s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:33.310801289 +0000 UTC m=+145.749925789" watchObservedRunningTime="2025-11-24 13:20:33.312554105 +0000 UTC m=+145.751678605" Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.312858 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-n2dwh" podStartSLOduration=120.312852862 podStartE2EDuration="2m0.312852862s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:33.271242468 +0000 UTC m=+145.710366968" watchObservedRunningTime="2025-11-24 13:20:33.312852862 +0000 UTC m=+145.751977352" Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.319292 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-2l986"] Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.367189 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:33 crc kubenswrapper[5039]: E1124 13:20:33.367746 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:33.867732758 +0000 UTC m=+146.306857258 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.403972 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8t5gh"] Nov 24 13:20:33 crc kubenswrapper[5039]: W1124 13:20:33.413165 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4637ec55_c9ee_48a4_9351_6a382efe4c91.slice/crio-a8e527a352d3f4ccc9546b1e0cd20b482c28dfc99d24f977ed91a88211ad9e22 WatchSource:0}: Error finding container a8e527a352d3f4ccc9546b1e0cd20b482c28dfc99d24f977ed91a88211ad9e22: Status 404 returned error can't find the container with id a8e527a352d3f4ccc9546b1e0cd20b482c28dfc99d24f977ed91a88211ad9e22 Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.417695 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-k884h"] Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.426105 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-kln9b"] Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.428263 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-c886j" podStartSLOduration=120.428232649 podStartE2EDuration="2m0.428232649s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:33.401700194 +0000 UTC m=+145.840824694" watchObservedRunningTime="2025-11-24 13:20:33.428232649 +0000 UTC m=+145.867357149" Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.431347 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-pcqbh"] Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.437978 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" podStartSLOduration=119.437946449 podStartE2EDuration="1m59.437946449s" podCreationTimestamp="2025-11-24 13:18:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:33.422722476 +0000 UTC m=+145.861846976" watchObservedRunningTime="2025-11-24 13:20:33.437946449 +0000 UTC m=+145.877070939" Nov 24 13:20:33 crc kubenswrapper[5039]: W1124 13:20:33.445669 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podefd3afce_d8bd_46c0_8bc6_ebace6984f16.slice/crio-969fda1f5305db4d6fa0629e22959d056b04cc67ed891069faf8b5010dd217b8 WatchSource:0}: Error finding container 969fda1f5305db4d6fa0629e22959d056b04cc67ed891069faf8b5010dd217b8: Status 404 returned error can't find the container with id 969fda1f5305db4d6fa0629e22959d056b04cc67ed891069faf8b5010dd217b8 Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.467587 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gkvs9" event={"ID":"157eb7c6-f1df-4336-8796-d100eda6102d","Type":"ContainerStarted","Data":"2de5bebd5261a366ad7c66e214fc39837c737d0ab50f84f214a869771d0449c9"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.468112 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:33 crc kubenswrapper[5039]: E1124 13:20:33.468252 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:33.968232561 +0000 UTC m=+146.407357061 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.468387 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:33 crc kubenswrapper[5039]: E1124 13:20:33.468754 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:33.968740924 +0000 UTC m=+146.407865424 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.470143 5039 generic.go:334] "Generic (PLEG): container finished" podID="cf6c1c3b-3f1c-449f-966b-d8617a4ca73d" containerID="c8a9dce66061e8d8ce158d28c8f5eb68b9f103f340a8b55ceea8b8923d59567a" exitCode=0 Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.470262 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" event={"ID":"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d","Type":"ContainerDied","Data":"c8a9dce66061e8d8ce158d28c8f5eb68b9f103f340a8b55ceea8b8923d59567a"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.473477 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2l986" event={"ID":"55fb48e8-0db2-4fe4-b3f8-1ede33b3a49d","Type":"ContainerStarted","Data":"d27befe02d815602a7876f46da60538bba166106d002209828025dbafe833961"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.474427 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-x9lf7" event={"ID":"2c2dcfde-39a1-42d0-ba00-d18199782ba5","Type":"ContainerStarted","Data":"5862020c3c363002d2b7090d5258c7725b6359d58a8d4990753666ebb790582d"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.475332 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-zqgfl" event={"ID":"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2","Type":"ContainerStarted","Data":"1c41a032064e74af8db8cc0bb155a375301bfa9999813a444502e63d675c3992"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.476178 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" event={"ID":"efd3afce-d8bd-46c0-8bc6-ebace6984f16","Type":"ContainerStarted","Data":"969fda1f5305db4d6fa0629e22959d056b04cc67ed891069faf8b5010dd217b8"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.477745 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-zf42k" event={"ID":"1f41fa87-ea96-4457-92a1-8bb69acc8b0e","Type":"ContainerStarted","Data":"59053a5119bc8944a25a0f921d6fd14b8e6605fd616ef7c79924a5b66ab21f1d"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.478938 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" event={"ID":"0883a675-24bc-4d9b-b318-feee05e49135","Type":"ContainerStarted","Data":"b4b232d3808308ce64c298612516c0fbb2bad72e3398b5416091ea205cc3d8e2"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.479935 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54" event={"ID":"6170f687-30e2-44b0-860e-ddcee4e4f2d4","Type":"ContainerStarted","Data":"7a5d6fbd3434ce2d121edcce9e34e34e10173fcbb1ec851212ffd2a050f02810"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.481068 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-md284" event={"ID":"1135d11d-6ba1-46f6-b01d-0cb7529831e7","Type":"ContainerStarted","Data":"85042384948cd08e3d4957074ed434ffdb4effbd06874210301312e2cc358552"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.482481 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" event={"ID":"988022f9-58e3-429b-9940-657283113440","Type":"ContainerStarted","Data":"5b0a9b980cfa0025b4af40c388491bb4e6fccfb9f008afae1ca84b65312ec907"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.483469 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-k884h" event={"ID":"4637ec55-c9ee-48a4-9351-6a382efe4c91","Type":"ContainerStarted","Data":"a8e527a352d3f4ccc9546b1e0cd20b482c28dfc99d24f977ed91a88211ad9e22"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.484224 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6kn6v" event={"ID":"4f19e739-3cb7-4007-8fce-7bfb0f76e0a4","Type":"ContainerStarted","Data":"0a1c11abc176a833e6d9acbadb11f7db6b190e1c9ea797658256d3f0733b01fb"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.485795 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4qp6m" event={"ID":"1988c73c-a04d-4b50-af92-54dfc2a4a262","Type":"ContainerStarted","Data":"81d50348d0ca22043ca5f60293d733d914cedcf9e72e28356714e3993fa24054"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.491140 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-29j84" event={"ID":"eded421c-1f8d-4719-ac97-30116f0eda31","Type":"ContainerStarted","Data":"5e89c2a5676259c5d7df5aa1df9fd5400ac455035119f5a1ba2c6a482cec105f"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.492912 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8t5gh" event={"ID":"23c83b86-90ee-4d41-9362-feaba87dfc0c","Type":"ContainerStarted","Data":"59e8192cbfabf418dca07506954c4927c1f1a4ad451b3e41d19ad22466957469"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.493774 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db" event={"ID":"17a9ee7f-16c3-40ce-b614-a30cee1e8d83","Type":"ContainerStarted","Data":"9a1740a6b43242eeaccc4142e2ef10229ff5448c8f07eedc56e6e1f123b14a07"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.496985 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" event={"ID":"21cbf909-e064-465c-bb64-5b0d5c82d691","Type":"ContainerStarted","Data":"0d0798226555cd746d27b4c4a5c47e4f6dadb9dbe30d5f37c97d56f984916b29"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.499798 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mx8zv" event={"ID":"f1d33349-6b41-43a0-9aa9-03084435fd75","Type":"ContainerStarted","Data":"87fff122c7147473915101c83a6522d678dd0a9528c69295d169c52464397255"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.501333 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-t958r" event={"ID":"5f7a5c45-edc8-443e-9730-f7a2eb5ab116","Type":"ContainerStarted","Data":"99913dd948e7e164530ad7d624f27631d5d19ad903be6d3aef9ad553233863e7"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.502805 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-fzl9j" event={"ID":"a63b9811-8d87-42cf-9773-3145399ce2b6","Type":"ContainerStarted","Data":"74fdde633707c9514295a46b9e824423edd0dfb9b0fc56b9bc9de9fc88393298"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.504496 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-krwk4" event={"ID":"75c9640f-f412-4f63-ac86-f4d665d57038","Type":"ContainerStarted","Data":"2320bf18bd94ebbcc56fd05186d18f8e8850724d82910ac93889e7f6ca6e8ab4"} Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.505573 5039 patch_prober.go:28] interesting pod/downloads-7954f5f757-98dp9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.505651 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-98dp9" podUID="c7032d1d-5aae-4e50-b10f-3df40a0cd983" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.506228 5039 patch_prober.go:28] interesting pod/console-operator-58897d9998-c886j container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.15:8443/readyz\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.506260 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-c886j" podUID="a9b8d64c-e7f3-4751-865c-c162aab7badd" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.15:8443/readyz\": dial tcp 10.217.0.15:8443: connect: connection refused" Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.506361 5039 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-lv5c7 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.506400 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" podUID="a6decbe9-edda-413b-b067-665ccf6efece" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.505607 5039 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-b8b2f container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.23:6443/healthz\": dial tcp 10.217.0.23:6443: connect: connection refused" start-of-body= Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.506442 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" podUID="b073719c-394b-496f-9d64-75681184acb0" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.23:6443/healthz\": dial tcp 10.217.0.23:6443: connect: connection refused" Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.569044 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:33 crc kubenswrapper[5039]: E1124 13:20:33.570078 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:34.070055319 +0000 UTC m=+146.509179819 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.671408 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:33 crc kubenswrapper[5039]: E1124 13:20:33.672200 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:34.172188184 +0000 UTC m=+146.611312684 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.772823 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:33 crc kubenswrapper[5039]: E1124 13:20:33.773040 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:34.273017095 +0000 UTC m=+146.712141595 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.773380 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:33 crc kubenswrapper[5039]: E1124 13:20:33.774097 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:34.274071222 +0000 UTC m=+146.713195722 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.874717 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:33 crc kubenswrapper[5039]: E1124 13:20:33.874938 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:34.374914054 +0000 UTC m=+146.814038554 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.875200 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:33 crc kubenswrapper[5039]: E1124 13:20:33.875551 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:34.37554347 +0000 UTC m=+146.814667960 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.905622 5039 patch_prober.go:28] interesting pod/router-default-5444994796-4hh9n container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.905707 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4hh9n" podUID="25637e2c-a1e3-4449-a549-7b081d0c4c4c" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.977368 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:33 crc kubenswrapper[5039]: E1124 13:20:33.977774 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:34.477732237 +0000 UTC m=+146.916856737 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:33 crc kubenswrapper[5039]: I1124 13:20:33.978022 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:33 crc kubenswrapper[5039]: E1124 13:20:33.981825 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:34.481802602 +0000 UTC m=+146.920927112 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.079708 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:34 crc kubenswrapper[5039]: E1124 13:20:34.079921 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:34.579887613 +0000 UTC m=+147.019012113 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.080537 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:34 crc kubenswrapper[5039]: E1124 13:20:34.080960 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:34.580949489 +0000 UTC m=+147.020074079 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.182332 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:34 crc kubenswrapper[5039]: E1124 13:20:34.182893 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:34.682868659 +0000 UTC m=+147.121993189 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.283752 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:34 crc kubenswrapper[5039]: E1124 13:20:34.284168 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:34.784152433 +0000 UTC m=+147.223276943 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.386525 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:34 crc kubenswrapper[5039]: E1124 13:20:34.387609 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:34.887587171 +0000 UTC m=+147.326711671 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.494129 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:34 crc kubenswrapper[5039]: E1124 13:20:34.494636 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:34.994619123 +0000 UTC m=+147.433743623 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.538940 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2" event={"ID":"32c545f6-2f66-4212-a7d0-01eab2f40da7","Type":"ContainerStarted","Data":"b0607e29c943efc8d274e33f61b43a306ffa086f6a1c16acdfda64b38f10e2de"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.550988 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-56bpb" event={"ID":"e04521cd-f63c-40ba-a296-a34d7ce739d7","Type":"ContainerStarted","Data":"7c82d856a413bffe588056d971db08e7adac1ee0857a70462f55017b3e9868b1"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.561984 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-md284" event={"ID":"1135d11d-6ba1-46f6-b01d-0cb7529831e7","Type":"ContainerStarted","Data":"a0916f8367550ad7162a04337c119b1c88a6db942fe9da49d4ae2f050911e73c"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.565441 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-t958r" podStartSLOduration=121.565418739 podStartE2EDuration="2m1.565418739s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:33.521469055 +0000 UTC m=+145.960593565" watchObservedRunningTime="2025-11-24 13:20:34.565418739 +0000 UTC m=+147.004543239" Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.598269 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.598394 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" event={"ID":"bc7f5002-5906-428a-bb9e-c3507cc151c8","Type":"ContainerStarted","Data":"6eeceb57e2a5c04ca1eb17dd70e9fdf41f28448d86057039f97ba79aa65478ed"} Nov 24 13:20:34 crc kubenswrapper[5039]: E1124 13:20:34.598949 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:35.098925204 +0000 UTC m=+147.538049714 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.599201 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.606789 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-56bpb" podStartSLOduration=121.606771907 podStartE2EDuration="2m1.606771907s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:34.584335878 +0000 UTC m=+147.023460378" watchObservedRunningTime="2025-11-24 13:20:34.606771907 +0000 UTC m=+147.045896407" Nov 24 13:20:34 crc kubenswrapper[5039]: E1124 13:20:34.609933 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:35.109911557 +0000 UTC m=+147.549036117 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.654808 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gkvs9" event={"ID":"157eb7c6-f1df-4336-8796-d100eda6102d","Type":"ContainerStarted","Data":"da6934f22dc288876db70485e809c2fac0313c1a653002ae3f9fafbad1232328"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.660530 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-29j84" event={"ID":"eded421c-1f8d-4719-ac97-30116f0eda31","Type":"ContainerStarted","Data":"c345a08a6e3692841adbe8afae24db97af92a7b7d07e8d6a5ae35c3fc06392a6"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.677667 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54" event={"ID":"6170f687-30e2-44b0-860e-ddcee4e4f2d4","Type":"ContainerStarted","Data":"8fea76e0ae8d5965ad8f64d9623d6e39bda662baa57206f38c5b9d0e603594c1"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.679940 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-md284" podStartSLOduration=120.679916603 podStartE2EDuration="2m0.679916603s" podCreationTimestamp="2025-11-24 13:18:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:34.608938593 +0000 UTC m=+147.048063093" watchObservedRunningTime="2025-11-24 13:20:34.679916603 +0000 UTC m=+147.119041103" Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.680591 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gkvs9" podStartSLOduration=121.680585271 podStartE2EDuration="2m1.680585271s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:34.679892363 +0000 UTC m=+147.119016873" watchObservedRunningTime="2025-11-24 13:20:34.680585271 +0000 UTC m=+147.119709771" Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.709330 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-krwk4" event={"ID":"75c9640f-f412-4f63-ac86-f4d665d57038","Type":"ContainerStarted","Data":"8b1396aefd082ceab028640b724596c0b35e45018a2053d8a6e9eee53bf7a48d"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.711398 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:34 crc kubenswrapper[5039]: E1124 13:20:34.712265 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:35.212221237 +0000 UTC m=+147.651345907 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.725123 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54" podStartSLOduration=121.7251016 podStartE2EDuration="2m1.7251016s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:34.710886712 +0000 UTC m=+147.150011212" watchObservedRunningTime="2025-11-24 13:20:34.7251016 +0000 UTC m=+147.164226100" Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.726375 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9dq5z" event={"ID":"6e075f32-2803-4a2e-bf1a-0b1858adabf0","Type":"ContainerStarted","Data":"1d6779cc5daa2723ad427066a9e9c10c8df9cb41224abc08d8c4daa96b8a8a38"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.728938 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-k4227" event={"ID":"792734d4-55a5-4a6f-be71-52a83b22c73f","Type":"ContainerStarted","Data":"a3654b0a9d66ef7fc4971421c2b13a8e3acfc65cd68cfe39651672df8e983b55"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.735908 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4qp6m" event={"ID":"1988c73c-a04d-4b50-af92-54dfc2a4a262","Type":"ContainerStarted","Data":"eba7576db9f2fe5c47fd5109f15bca32221b4854b3fbbc188d03406b36902448"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.738354 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-6gfhg" event={"ID":"b74506b2-3cd6-4803-9a11-69fa714e570e","Type":"ContainerStarted","Data":"0f78aa696cd21e4c877b1cf027c8be2e741f8687a2d834e1de3ef27f207e06c4"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.743174 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-krwk4" podStartSLOduration=6.743145375 podStartE2EDuration="6.743145375s" podCreationTimestamp="2025-11-24 13:20:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:34.739526242 +0000 UTC m=+147.178650742" watchObservedRunningTime="2025-11-24 13:20:34.743145375 +0000 UTC m=+147.182269875" Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.749749 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-zf42k" event={"ID":"1f41fa87-ea96-4457-92a1-8bb69acc8b0e","Type":"ContainerStarted","Data":"022abad8862b4fd916a4aa40b245b4fec03d41ad62076d104ec6b22f239626e5"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.752223 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-fzl9j" event={"ID":"a63b9811-8d87-42cf-9773-3145399ce2b6","Type":"ContainerStarted","Data":"e11bc8168eb71e5e816c5c6dc8d79f6dc4a813415720975dcb7a798d270b8c1c"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.764382 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9dq5z" podStartSLOduration=121.764356783 podStartE2EDuration="2m1.764356783s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:34.759670061 +0000 UTC m=+147.198794561" watchObservedRunningTime="2025-11-24 13:20:34.764356783 +0000 UTC m=+147.203481283" Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.775422 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6kn6v" event={"ID":"4f19e739-3cb7-4007-8fce-7bfb0f76e0a4","Type":"ContainerStarted","Data":"186f9598efab23e65594ee860ee1a59f10b060f47937fc0a133b5d9c80af8d42"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.792049 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" event={"ID":"988022f9-58e3-429b-9940-657283113440","Type":"ContainerStarted","Data":"046a9d05de9057914ad102bd81d6c346b74f76b4570396b9f32ce9c95655cdf5"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.793104 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.794670 5039 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-g5ldp container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.42:5443/healthz\": dial tcp 10.217.0.42:5443: connect: connection refused" start-of-body= Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.794718 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" podUID="988022f9-58e3-429b-9940-657283113440" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.42:5443/healthz\": dial tcp 10.217.0.42:5443: connect: connection refused" Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.802193 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-6gfhg" podStartSLOduration=120.802168268 podStartE2EDuration="2m0.802168268s" podCreationTimestamp="2025-11-24 13:18:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:34.791892413 +0000 UTC m=+147.231016913" watchObservedRunningTime="2025-11-24 13:20:34.802168268 +0000 UTC m=+147.241292768" Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.813275 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.813592 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4qp6m" podStartSLOduration=121.813582063 podStartE2EDuration="2m1.813582063s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:34.807443214 +0000 UTC m=+147.246567714" watchObservedRunningTime="2025-11-24 13:20:34.813582063 +0000 UTC m=+147.252706553" Nov 24 13:20:34 crc kubenswrapper[5039]: E1124 13:20:34.814472 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:35.314459335 +0000 UTC m=+147.753583835 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.821265 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-zqgfl" event={"ID":"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2","Type":"ContainerStarted","Data":"14e6cf73b8d10e0900286ffe41debff82c381e768307053485d0b5094ca2af67"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.823982 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-k884h" event={"ID":"4637ec55-c9ee-48a4-9351-6a382efe4c91","Type":"ContainerStarted","Data":"d2b067a7ef2146dd02670527143ed669da4760a9b86427a5bbc9b9dc03bbae94"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.824795 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-k884h" Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.840628 5039 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-k884h container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.30:8080/healthz\": dial tcp 10.217.0.30:8080: connect: connection refused" start-of-body= Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.840679 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-k884h" podUID="4637ec55-c9ee-48a4-9351-6a382efe4c91" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.30:8080/healthz\": dial tcp 10.217.0.30:8080: connect: connection refused" Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.844572 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2l986" event={"ID":"55fb48e8-0db2-4fe4-b3f8-1ede33b3a49d","Type":"ContainerStarted","Data":"bec6ca181e1926425b84816dac6668fc03daf2607ad9c91c9290918094217ff2"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.852618 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db" event={"ID":"17a9ee7f-16c3-40ce-b614-a30cee1e8d83","Type":"ContainerStarted","Data":"953d2c4f4144b03340cef97b23e0d8c01ad8a1558d32fed41364511a90951a39"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.854335 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db" Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.864980 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-blxxm" event={"ID":"f4787aef-1d33-49b7-b5c3-7b0404bab7f5","Type":"ContainerStarted","Data":"35dbbc4d6b7d427bbcc9a7fcf29aab871258241232390f943723fa3da9fde9e3"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.866002 5039 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-5q4db container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.866035 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db" podUID="17a9ee7f-16c3-40ce-b614-a30cee1e8d83" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.900988 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jbfln" event={"ID":"20f8a1d6-8417-4cd3-9af7-b8e8f05f4c52","Type":"ContainerStarted","Data":"d5c99f77370de1389225a81f9ada3e0be042a1fed78caf03c49025971ca09bb8"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.917324 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:34 crc kubenswrapper[5039]: E1124 13:20:34.918084 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:35.418062578 +0000 UTC m=+147.857187078 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.918585 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:34 crc kubenswrapper[5039]: E1124 13:20:34.920747 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:35.420733337 +0000 UTC m=+147.859857837 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.921456 5039 patch_prober.go:28] interesting pod/router-default-5444994796-4hh9n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 13:20:34 crc kubenswrapper[5039]: [-]has-synced failed: reason withheld Nov 24 13:20:34 crc kubenswrapper[5039]: [+]process-running ok Nov 24 13:20:34 crc kubenswrapper[5039]: healthz check failed Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.921521 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4hh9n" podUID="25637e2c-a1e3-4449-a549-7b081d0c4c4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.927479 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" podStartSLOduration=121.927261136 podStartE2EDuration="2m1.927261136s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:34.923689003 +0000 UTC m=+147.362813503" watchObservedRunningTime="2025-11-24 13:20:34.927261136 +0000 UTC m=+147.366385636" Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.932356 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-zf42k" podStartSLOduration=121.932337337 podStartE2EDuration="2m1.932337337s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:34.863990073 +0000 UTC m=+147.303114573" watchObservedRunningTime="2025-11-24 13:20:34.932337337 +0000 UTC m=+147.371461837" Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.942566 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-x9lf7" event={"ID":"2c2dcfde-39a1-42d0-ba00-d18199782ba5","Type":"ContainerStarted","Data":"2ec4aa66cff0e7ac2c9b79dc716ee512857c736660bf0ea671670b9386697c1e"} Nov 24 13:20:34 crc kubenswrapper[5039]: I1124 13:20:34.979392 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-fzl9j" podStartSLOduration=6.97937511 podStartE2EDuration="6.97937511s" podCreationTimestamp="2025-11-24 13:20:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:34.97936635 +0000 UTC m=+147.418490850" watchObservedRunningTime="2025-11-24 13:20:34.97937511 +0000 UTC m=+147.418499610" Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.019595 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:35 crc kubenswrapper[5039]: E1124 13:20:35.020095 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:35.52007419 +0000 UTC m=+147.959198690 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.023638 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" event={"ID":"cf6c1c3b-3f1c-449f-966b-d8617a4ca73d","Type":"ContainerStarted","Data":"18f1697f47c17c29e8df3d1a8045d8117913b4f9949fad792b94705b414d50c6"} Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.026084 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8t5gh" event={"ID":"23c83b86-90ee-4d41-9362-feaba87dfc0c","Type":"ContainerStarted","Data":"e8fedfbad5f04adc5e4f7be3de4592758c399cb65a61c0d682237c3e353a19eb"} Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.044842 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ndmnz" event={"ID":"2d22e4bd-1186-43a6-bc4f-d313377c78f3","Type":"ContainerStarted","Data":"0dd575954429be616c1675a658bdc73b9fbe38954bbf8bb4622c47f295a47ce0"} Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.049027 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bmdnh" event={"ID":"5d53487c-6d56-443c-94d3-899cd8be9666","Type":"ContainerStarted","Data":"dc892ca88736b91da350815d038240f877df316963edb70c98a329115b11e6c8"} Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.049493 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bmdnh" Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.054481 5039 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-bmdnh container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" start-of-body= Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.054579 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bmdnh" podUID="5d53487c-6d56-443c-94d3-899cd8be9666" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.054629 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jbfln" podStartSLOduration=122.054602201 podStartE2EDuration="2m2.054602201s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:35.052614929 +0000 UTC m=+147.491739439" watchObservedRunningTime="2025-11-24 13:20:35.054602201 +0000 UTC m=+147.493726701" Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.055280 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-zqgfl" podStartSLOduration=122.055273358 podStartE2EDuration="2m2.055273358s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:35.031881935 +0000 UTC m=+147.471006435" watchObservedRunningTime="2025-11-24 13:20:35.055273358 +0000 UTC m=+147.494397858" Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.056641 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" event={"ID":"efd3afce-d8bd-46c0-8bc6-ebace6984f16","Type":"ContainerStarted","Data":"0c369a580945bf99afc914ac825da6f56f5e526079e25d39f886d16163facb0b"} Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.057010 5039 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-b8b2f container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.23:6443/healthz\": dial tcp 10.217.0.23:6443: connect: connection refused" start-of-body= Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.057041 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" podUID="b073719c-394b-496f-9d64-75681184acb0" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.23:6443/healthz\": dial tcp 10.217.0.23:6443: connect: connection refused" Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.068870 5039 patch_prober.go:28] interesting pod/console-operator-58897d9998-c886j container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.15:8443/readyz\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.068953 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-c886j" podUID="a9b8d64c-e7f3-4751-865c-c162aab7badd" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.15:8443/readyz\": dial tcp 10.217.0.15:8443: connect: connection refused" Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.089638 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-k884h" podStartSLOduration=122.089618015 podStartE2EDuration="2m2.089618015s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:35.087063579 +0000 UTC m=+147.526188079" watchObservedRunningTime="2025-11-24 13:20:35.089618015 +0000 UTC m=+147.528742515" Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.118275 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-blxxm" podStartSLOduration=122.118249313 podStartE2EDuration="2m2.118249313s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:35.115204515 +0000 UTC m=+147.554329025" watchObservedRunningTime="2025-11-24 13:20:35.118249313 +0000 UTC m=+147.557373813" Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.123848 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:35 crc kubenswrapper[5039]: E1124 13:20:35.124568 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:35.624549666 +0000 UTC m=+148.063674176 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.138031 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db" podStartSLOduration=122.138004963 podStartE2EDuration="2m2.138004963s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:35.136548775 +0000 UTC m=+147.575673275" watchObservedRunningTime="2025-11-24 13:20:35.138004963 +0000 UTC m=+147.577129463" Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.174569 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" podStartSLOduration=122.174496414 podStartE2EDuration="2m2.174496414s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:35.173586891 +0000 UTC m=+147.612711411" watchObservedRunningTime="2025-11-24 13:20:35.174496414 +0000 UTC m=+147.613620904" Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.197778 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bmdnh" podStartSLOduration=122.197759825 podStartE2EDuration="2m2.197759825s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:35.197022376 +0000 UTC m=+147.636146886" watchObservedRunningTime="2025-11-24 13:20:35.197759825 +0000 UTC m=+147.636884325" Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.220959 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mx8zv" podStartSLOduration=122.220941323 podStartE2EDuration="2m2.220941323s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:35.22080881 +0000 UTC m=+147.659933330" watchObservedRunningTime="2025-11-24 13:20:35.220941323 +0000 UTC m=+147.660065823" Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.224945 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:35 crc kubenswrapper[5039]: E1124 13:20:35.227184 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:35.727164653 +0000 UTC m=+148.166289153 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.247600 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" podStartSLOduration=122.24756645 podStartE2EDuration="2m2.24756645s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:35.246235656 +0000 UTC m=+147.685360156" watchObservedRunningTime="2025-11-24 13:20:35.24756645 +0000 UTC m=+147.686690950" Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.266349 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-kln9b" podStartSLOduration=122.266305783 podStartE2EDuration="2m2.266305783s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:35.265858452 +0000 UTC m=+147.704982962" watchObservedRunningTime="2025-11-24 13:20:35.266305783 +0000 UTC m=+147.705430283" Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.327239 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:35 crc kubenswrapper[5039]: E1124 13:20:35.327624 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:35.827607985 +0000 UTC m=+148.266732485 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.428818 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:35 crc kubenswrapper[5039]: E1124 13:20:35.429152 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:35.929136674 +0000 UTC m=+148.368261174 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.529961 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:35 crc kubenswrapper[5039]: E1124 13:20:35.530344 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:36.030330996 +0000 UTC m=+148.469455496 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.625920 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.626381 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.627673 5039 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-6l58p container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.8:8443/livez\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.627709 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" podUID="cf6c1c3b-3f1c-449f-966b-d8617a4ca73d" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.8:8443/livez\": dial tcp 10.217.0.8:8443: connect: connection refused" Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.631245 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:35 crc kubenswrapper[5039]: E1124 13:20:35.631533 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:36.131517366 +0000 UTC m=+148.570641866 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.733059 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:35 crc kubenswrapper[5039]: E1124 13:20:35.733692 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:36.233657982 +0000 UTC m=+148.672782662 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.834392 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:35 crc kubenswrapper[5039]: E1124 13:20:35.834740 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:36.334691188 +0000 UTC m=+148.773815698 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.835099 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:35 crc kubenswrapper[5039]: E1124 13:20:35.835529 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:36.335487339 +0000 UTC m=+148.774611849 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.911807 5039 patch_prober.go:28] interesting pod/router-default-5444994796-4hh9n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 13:20:35 crc kubenswrapper[5039]: [-]has-synced failed: reason withheld Nov 24 13:20:35 crc kubenswrapper[5039]: [+]process-running ok Nov 24 13:20:35 crc kubenswrapper[5039]: healthz check failed Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.911890 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4hh9n" podUID="25637e2c-a1e3-4449-a549-7b081d0c4c4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.936479 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:35 crc kubenswrapper[5039]: E1124 13:20:35.936633 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:36.436611468 +0000 UTC m=+148.875735968 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:35 crc kubenswrapper[5039]: I1124 13:20:35.936721 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:35 crc kubenswrapper[5039]: E1124 13:20:35.937043 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:36.437032539 +0000 UTC m=+148.876157039 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.037744 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:36 crc kubenswrapper[5039]: E1124 13:20:36.037925 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:36.537899781 +0000 UTC m=+148.977024281 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.038023 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:36 crc kubenswrapper[5039]: E1124 13:20:36.038327 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:36.538318062 +0000 UTC m=+148.977442562 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.065361 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-29j84" event={"ID":"eded421c-1f8d-4719-ac97-30116f0eda31","Type":"ContainerStarted","Data":"94fdb138b617ad3d7d19ea47f136c1e719666f1d622b3beaf3845fc0371418ec"} Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.065551 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-29j84" Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.068018 5039 generic.go:334] "Generic (PLEG): container finished" podID="32c545f6-2f66-4212-a7d0-01eab2f40da7" containerID="b0607e29c943efc8d274e33f61b43a306ffa086f6a1c16acdfda64b38f10e2de" exitCode=0 Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.068113 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2" event={"ID":"32c545f6-2f66-4212-a7d0-01eab2f40da7","Type":"ContainerDied","Data":"b0607e29c943efc8d274e33f61b43a306ffa086f6a1c16acdfda64b38f10e2de"} Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.073904 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2l986" event={"ID":"55fb48e8-0db2-4fe4-b3f8-1ede33b3a49d","Type":"ContainerStarted","Data":"0b0314362f99bd6fb53adb68b6221827828a9637e02e8842105c16da7366af95"} Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.075993 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-x9lf7" event={"ID":"2c2dcfde-39a1-42d0-ba00-d18199782ba5","Type":"ContainerStarted","Data":"1a26c5d90708cab259c6387670e15932b21158287be7f806908297f0782cdf0f"} Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.078377 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" event={"ID":"bc7f5002-5906-428a-bb9e-c3507cc151c8","Type":"ContainerStarted","Data":"7c7143d792ffb01b82630adecade7a72e46d2568e98c776ea46e7a784359ef14"} Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.080123 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ndmnz" event={"ID":"2d22e4bd-1186-43a6-bc4f-d313377c78f3","Type":"ContainerStarted","Data":"26d0662f788f6a27e8be457a9f4378e1c7a5810d2bf081ae6535929088ce6023"} Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.080515 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ndmnz" Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.081794 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-k4227" event={"ID":"792734d4-55a5-4a6f-be71-52a83b22c73f","Type":"ContainerStarted","Data":"29a83138deac51a7b39863cd534248622dc9d8fb23dafcacdd2a06f21b175cac"} Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.084085 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6kn6v" event={"ID":"4f19e739-3cb7-4007-8fce-7bfb0f76e0a4","Type":"ContainerStarted","Data":"006bfae22f5266893d2695a0d6e3748044795694467710ebe66100d2f63cea15"} Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.085981 5039 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-5q4db container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.086066 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db" podUID="17a9ee7f-16c3-40ce-b614-a30cee1e8d83" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.087353 5039 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-bmdnh container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" start-of-body= Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.087387 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bmdnh" podUID="5d53487c-6d56-443c-94d3-899cd8be9666" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.089353 5039 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-g5ldp container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.42:5443/healthz\": dial tcp 10.217.0.42:5443: connect: connection refused" start-of-body= Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.089415 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" podUID="988022f9-58e3-429b-9940-657283113440" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.42:5443/healthz\": dial tcp 10.217.0.42:5443: connect: connection refused" Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.089357 5039 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-k884h container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.30:8080/healthz\": dial tcp 10.217.0.30:8080: connect: connection refused" start-of-body= Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.089469 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-k884h" podUID="4637ec55-c9ee-48a4-9351-6a382efe4c91" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.30:8080/healthz\": dial tcp 10.217.0.30:8080: connect: connection refused" Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.094233 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-8t5gh" podStartSLOduration=123.094207574 podStartE2EDuration="2m3.094207574s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:35.295366323 +0000 UTC m=+147.734490823" watchObservedRunningTime="2025-11-24 13:20:36.094207574 +0000 UTC m=+148.533332074" Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.111116 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-29j84" podStartSLOduration=8.111091719 podStartE2EDuration="8.111091719s" podCreationTimestamp="2025-11-24 13:20:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:36.096930624 +0000 UTC m=+148.536055124" watchObservedRunningTime="2025-11-24 13:20:36.111091719 +0000 UTC m=+148.550216219" Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.140115 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:36 crc kubenswrapper[5039]: E1124 13:20:36.140276 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:36.640245622 +0000 UTC m=+149.079370132 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.140440 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:36 crc kubenswrapper[5039]: E1124 13:20:36.140915 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:36.640902699 +0000 UTC m=+149.080027249 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.147153 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-x9lf7" podStartSLOduration=123.14713355 podStartE2EDuration="2m3.14713355s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:36.141129695 +0000 UTC m=+148.580254225" watchObservedRunningTime="2025-11-24 13:20:36.14713355 +0000 UTC m=+148.586258060" Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.148170 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6kn6v" podStartSLOduration=123.148159156 podStartE2EDuration="2m3.148159156s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:36.114250192 +0000 UTC m=+148.553374692" watchObservedRunningTime="2025-11-24 13:20:36.148159156 +0000 UTC m=+148.587283656" Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.242130 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:36 crc kubenswrapper[5039]: E1124 13:20:36.242481 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:36.742459219 +0000 UTC m=+149.181583719 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.243586 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:36 crc kubenswrapper[5039]: E1124 13:20:36.250135 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:36.750113667 +0000 UTC m=+149.189238167 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.255725 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" podStartSLOduration=123.255707172 podStartE2EDuration="2m3.255707172s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:36.237044579 +0000 UTC m=+148.676169089" watchObservedRunningTime="2025-11-24 13:20:36.255707172 +0000 UTC m=+148.694831672" Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.256020 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2l986" podStartSLOduration=123.256010369 podStartE2EDuration="2m3.256010369s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:36.253393591 +0000 UTC m=+148.692518091" watchObservedRunningTime="2025-11-24 13:20:36.256010369 +0000 UTC m=+148.695134869" Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.281310 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ndmnz" podStartSLOduration=123.281280001 podStartE2EDuration="2m3.281280001s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:36.27777378 +0000 UTC m=+148.716898280" watchObservedRunningTime="2025-11-24 13:20:36.281280001 +0000 UTC m=+148.720404501" Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.344884 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:36 crc kubenswrapper[5039]: E1124 13:20:36.345091 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:36.845061497 +0000 UTC m=+149.284185997 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.345177 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:36 crc kubenswrapper[5039]: E1124 13:20:36.345583 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:36.84557568 +0000 UTC m=+149.284700180 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.446650 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:36 crc kubenswrapper[5039]: E1124 13:20:36.446915 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:36.946858123 +0000 UTC m=+149.385982623 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.447179 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:36 crc kubenswrapper[5039]: E1124 13:20:36.447530 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:36.94749196 +0000 UTC m=+149.386616450 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.548426 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:36 crc kubenswrapper[5039]: E1124 13:20:36.548753 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.048708711 +0000 UTC m=+149.487833211 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.548957 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:36 crc kubenswrapper[5039]: E1124 13:20:36.549381 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.049362497 +0000 UTC m=+149.488486997 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.650239 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:36 crc kubenswrapper[5039]: E1124 13:20:36.650541 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.150478367 +0000 UTC m=+149.589602867 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.651073 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:36 crc kubenswrapper[5039]: E1124 13:20:36.651647 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.151635826 +0000 UTC m=+149.590760506 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.752649 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:36 crc kubenswrapper[5039]: E1124 13:20:36.752974 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.252943801 +0000 UTC m=+149.692068311 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.753142 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:36 crc kubenswrapper[5039]: E1124 13:20:36.753491 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.253480354 +0000 UTC m=+149.692604854 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.853994 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:36 crc kubenswrapper[5039]: E1124 13:20:36.854154 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.354127951 +0000 UTC m=+149.793252451 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.854241 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:36 crc kubenswrapper[5039]: E1124 13:20:36.854607 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.354597103 +0000 UTC m=+149.793721613 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.909124 5039 patch_prober.go:28] interesting pod/router-default-5444994796-4hh9n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 13:20:36 crc kubenswrapper[5039]: [-]has-synced failed: reason withheld Nov 24 13:20:36 crc kubenswrapper[5039]: [+]process-running ok Nov 24 13:20:36 crc kubenswrapper[5039]: healthz check failed Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.909187 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4hh9n" podUID="25637e2c-a1e3-4449-a549-7b081d0c4c4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.955665 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:36 crc kubenswrapper[5039]: E1124 13:20:36.955907 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.455863826 +0000 UTC m=+149.894988326 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:36 crc kubenswrapper[5039]: I1124 13:20:36.956002 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:36 crc kubenswrapper[5039]: E1124 13:20:36.956485 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.456464811 +0000 UTC m=+149.895589311 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.057217 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:37 crc kubenswrapper[5039]: E1124 13:20:37.057365 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.557331144 +0000 UTC m=+149.996455644 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.057587 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:37 crc kubenswrapper[5039]: E1124 13:20:37.057973 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.557962271 +0000 UTC m=+149.997086831 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.096883 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2" event={"ID":"32c545f6-2f66-4212-a7d0-01eab2f40da7","Type":"ContainerStarted","Data":"a4f5c6eda3a188d444f41ebd19a2da38f032d36acb967a94d92876eaf56082af"} Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.097654 5039 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-g5ldp container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.42:5443/healthz\": dial tcp 10.217.0.42:5443: connect: connection refused" start-of-body= Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.097710 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" podUID="988022f9-58e3-429b-9940-657283113440" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.42:5443/healthz\": dial tcp 10.217.0.42:5443: connect: connection refused" Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.097742 5039 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-5q4db container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.097764 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db" podUID="17a9ee7f-16c3-40ce-b614-a30cee1e8d83" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.098851 5039 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-k884h container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.30:8080/healthz\": dial tcp 10.217.0.30:8080: connect: connection refused" start-of-body= Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.098941 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-k884h" podUID="4637ec55-c9ee-48a4-9351-6a382efe4c91" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.30:8080/healthz\": dial tcp 10.217.0.30:8080: connect: connection refused" Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.120722 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2" podStartSLOduration=124.120702349 podStartE2EDuration="2m4.120702349s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:37.119305632 +0000 UTC m=+149.558430132" watchObservedRunningTime="2025-11-24 13:20:37.120702349 +0000 UTC m=+149.559826849" Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.121809 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-k4227" podStartSLOduration=124.121803827 podStartE2EDuration="2m4.121803827s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:36.311384217 +0000 UTC m=+148.750508707" watchObservedRunningTime="2025-11-24 13:20:37.121803827 +0000 UTC m=+149.560928327" Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.159035 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:37 crc kubenswrapper[5039]: E1124 13:20:37.159165 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.659147771 +0000 UTC m=+150.098272281 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.159194 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:37 crc kubenswrapper[5039]: E1124 13:20:37.161313 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.661299686 +0000 UTC m=+150.100424396 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.260348 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.260554 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.260610 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.260705 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:20:37 crc kubenswrapper[5039]: E1124 13:20:37.260810 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.760721761 +0000 UTC m=+150.199846261 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.260903 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.264350 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.267886 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.269280 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.269923 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.320081 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.326296 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.362205 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:37 crc kubenswrapper[5039]: E1124 13:20:37.362605 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.86258762 +0000 UTC m=+150.301712120 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.468291 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:37 crc kubenswrapper[5039]: E1124 13:20:37.468476 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.968452532 +0000 UTC m=+150.407577042 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.469216 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:37 crc kubenswrapper[5039]: E1124 13:20:37.469774 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:37.969763345 +0000 UTC m=+150.408887845 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.526278 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.581949 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:37 crc kubenswrapper[5039]: E1124 13:20:37.582223 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:38.082175575 +0000 UTC m=+150.521300075 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.582577 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:37 crc kubenswrapper[5039]: E1124 13:20:37.582914 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:38.082897015 +0000 UTC m=+150.522021525 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.683952 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:37 crc kubenswrapper[5039]: E1124 13:20:37.684687 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:38.18466389 +0000 UTC m=+150.623788390 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.787808 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:37 crc kubenswrapper[5039]: E1124 13:20:37.788373 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:38.288357335 +0000 UTC m=+150.727481835 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.894084 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:37 crc kubenswrapper[5039]: E1124 13:20:37.895136 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:38.39510491 +0000 UTC m=+150.834229420 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.910020 5039 patch_prober.go:28] interesting pod/router-default-5444994796-4hh9n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 13:20:37 crc kubenswrapper[5039]: [-]has-synced failed: reason withheld Nov 24 13:20:37 crc kubenswrapper[5039]: [+]process-running ok Nov 24 13:20:37 crc kubenswrapper[5039]: healthz check failed Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.910099 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4hh9n" podUID="25637e2c-a1e3-4449-a549-7b081d0c4c4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 13:20:37 crc kubenswrapper[5039]: I1124 13:20:37.996380 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:37 crc kubenswrapper[5039]: E1124 13:20:37.996925 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:38.496909976 +0000 UTC m=+150.936034476 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.097356 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:38 crc kubenswrapper[5039]: E1124 13:20:38.097519 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:38.597480071 +0000 UTC m=+151.036604591 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.097572 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:38 crc kubenswrapper[5039]: E1124 13:20:38.097836 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:38.59782878 +0000 UTC m=+151.036953280 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.110327 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"3a8639f44da1355af9d7bd9bbebf06c46345f6fecbb9011cbebe44a3c6285f29"} Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.110370 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"d48ac4b618956fc1759136d16bc3fd333c7621de57911b7aca3711a5e0507c31"} Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.115953 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" event={"ID":"21cbf909-e064-465c-bb64-5b0d5c82d691","Type":"ContainerStarted","Data":"4765c488e52b14bdb16f3ae03151c8848b18b36d717f0480eb24d68e61d4a2bf"} Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.119544 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"69281c2ea92f7a88c1993efac1f90357469978f5cbe9766f5c7a30ffa699ba8c"} Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.121812 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"b74002bb130d5c18e9fbaed64e34b6c3b9fc84a9b60c6c90976727cfbbd2c04a"} Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.121849 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2" Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.198589 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:38 crc kubenswrapper[5039]: E1124 13:20:38.198765 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:38.698728483 +0000 UTC m=+151.137852983 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.199022 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:38 crc kubenswrapper[5039]: E1124 13:20:38.199977 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:38.699959655 +0000 UTC m=+151.139084155 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.301000 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:38 crc kubenswrapper[5039]: E1124 13:20:38.301491 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:38.801464714 +0000 UTC m=+151.240589214 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.402699 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:38 crc kubenswrapper[5039]: E1124 13:20:38.403088 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:38.903069935 +0000 UTC m=+151.342194435 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.503764 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:38 crc kubenswrapper[5039]: E1124 13:20:38.503909 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:39.003888637 +0000 UTC m=+151.443013137 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.503961 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:38 crc kubenswrapper[5039]: E1124 13:20:38.504249 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:39.004241336 +0000 UTC m=+151.443365836 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.605534 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:38 crc kubenswrapper[5039]: E1124 13:20:38.605672 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:39.105654913 +0000 UTC m=+151.544779413 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.605827 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:38 crc kubenswrapper[5039]: E1124 13:20:38.606111 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:39.106104404 +0000 UTC m=+151.545228904 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.706442 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:38 crc kubenswrapper[5039]: E1124 13:20:38.706602 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:39.206568756 +0000 UTC m=+151.645693266 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.706683 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:38 crc kubenswrapper[5039]: E1124 13:20:38.707062 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:39.207045948 +0000 UTC m=+151.646170448 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.807585 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:38 crc kubenswrapper[5039]: E1124 13:20:38.807837 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:39.307802438 +0000 UTC m=+151.746926948 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.807904 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:38 crc kubenswrapper[5039]: E1124 13:20:38.808456 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:39.308445315 +0000 UTC m=+151.747569815 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.909774 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:38 crc kubenswrapper[5039]: E1124 13:20:38.909930 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:39.409908773 +0000 UTC m=+151.849033273 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.910039 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:38 crc kubenswrapper[5039]: E1124 13:20:38.910283 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:39.410276153 +0000 UTC m=+151.849400653 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.911404 5039 patch_prober.go:28] interesting pod/router-default-5444994796-4hh9n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 13:20:38 crc kubenswrapper[5039]: [-]has-synced failed: reason withheld Nov 24 13:20:38 crc kubenswrapper[5039]: [+]process-running ok Nov 24 13:20:38 crc kubenswrapper[5039]: healthz check failed Nov 24 13:20:38 crc kubenswrapper[5039]: I1124 13:20:38.911451 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4hh9n" podUID="25637e2c-a1e3-4449-a549-7b081d0c4c4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.011074 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:39 crc kubenswrapper[5039]: E1124 13:20:39.011273 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:39.511247248 +0000 UTC m=+151.950371748 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.011382 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:39 crc kubenswrapper[5039]: E1124 13:20:39.011681 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:39.511668018 +0000 UTC m=+151.950792518 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.112329 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:39 crc kubenswrapper[5039]: E1124 13:20:39.112522 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:39.612478129 +0000 UTC m=+152.051602629 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.112660 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:39 crc kubenswrapper[5039]: E1124 13:20:39.112907 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:39.61289452 +0000 UTC m=+152.052019020 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.127701 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"9d0aadb5b0b330646b3d6a85f2dd06a27de0095dcdd6571dedc02a8f2b563dc0"} Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.129235 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"765bd1b99fe1b0d420e152878fd45d9d573e0c9456109e72efcab82230ac6f04"} Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.129635 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.131307 5039 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-6g9d2 container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.131358 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2" podUID="32c545f6-2f66-4212-a7d0-01eab2f40da7" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.213663 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:39 crc kubenswrapper[5039]: E1124 13:20:39.214062 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:39.71403469 +0000 UTC m=+152.153159190 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.315295 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:39 crc kubenswrapper[5039]: E1124 13:20:39.315600 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:39.81558859 +0000 UTC m=+152.254713090 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.401189 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-9979w"] Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.402039 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9979w" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.404286 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.416319 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:39 crc kubenswrapper[5039]: E1124 13:20:39.416433 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:39.916416861 +0000 UTC m=+152.355541351 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.416625 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqv77\" (UniqueName: \"kubernetes.io/projected/aab10f23-6223-4554-9a20-3669e7e0eb72-kube-api-access-rqv77\") pod \"community-operators-9979w\" (UID: \"aab10f23-6223-4554-9a20-3669e7e0eb72\") " pod="openshift-marketplace/community-operators-9979w" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.416673 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.416722 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aab10f23-6223-4554-9a20-3669e7e0eb72-catalog-content\") pod \"community-operators-9979w\" (UID: \"aab10f23-6223-4554-9a20-3669e7e0eb72\") " pod="openshift-marketplace/community-operators-9979w" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.416805 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aab10f23-6223-4554-9a20-3669e7e0eb72-utilities\") pod \"community-operators-9979w\" (UID: \"aab10f23-6223-4554-9a20-3669e7e0eb72\") " pod="openshift-marketplace/community-operators-9979w" Nov 24 13:20:39 crc kubenswrapper[5039]: E1124 13:20:39.417029 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:39.917020127 +0000 UTC m=+152.356144627 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.494896 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9979w"] Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.521667 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.522195 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aab10f23-6223-4554-9a20-3669e7e0eb72-utilities\") pod \"community-operators-9979w\" (UID: \"aab10f23-6223-4554-9a20-3669e7e0eb72\") " pod="openshift-marketplace/community-operators-9979w" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.522291 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqv77\" (UniqueName: \"kubernetes.io/projected/aab10f23-6223-4554-9a20-3669e7e0eb72-kube-api-access-rqv77\") pod \"community-operators-9979w\" (UID: \"aab10f23-6223-4554-9a20-3669e7e0eb72\") " pod="openshift-marketplace/community-operators-9979w" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.522368 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aab10f23-6223-4554-9a20-3669e7e0eb72-catalog-content\") pod \"community-operators-9979w\" (UID: \"aab10f23-6223-4554-9a20-3669e7e0eb72\") " pod="openshift-marketplace/community-operators-9979w" Nov 24 13:20:39 crc kubenswrapper[5039]: E1124 13:20:39.522531 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:40.022484248 +0000 UTC m=+152.461608748 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.523039 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aab10f23-6223-4554-9a20-3669e7e0eb72-utilities\") pod \"community-operators-9979w\" (UID: \"aab10f23-6223-4554-9a20-3669e7e0eb72\") " pod="openshift-marketplace/community-operators-9979w" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.523364 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aab10f23-6223-4554-9a20-3669e7e0eb72-catalog-content\") pod \"community-operators-9979w\" (UID: \"aab10f23-6223-4554-9a20-3669e7e0eb72\") " pod="openshift-marketplace/community-operators-9979w" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.563982 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqv77\" (UniqueName: \"kubernetes.io/projected/aab10f23-6223-4554-9a20-3669e7e0eb72-kube-api-access-rqv77\") pod \"community-operators-9979w\" (UID: \"aab10f23-6223-4554-9a20-3669e7e0eb72\") " pod="openshift-marketplace/community-operators-9979w" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.586812 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-76mnr"] Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.588064 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-76mnr" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.593815 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.602434 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-76mnr"] Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.624155 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ph79\" (UniqueName: \"kubernetes.io/projected/e7514bf7-ccdc-42f0-a159-78d12f91e55c-kube-api-access-4ph79\") pod \"certified-operators-76mnr\" (UID: \"e7514bf7-ccdc-42f0-a159-78d12f91e55c\") " pod="openshift-marketplace/certified-operators-76mnr" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.624215 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.624251 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7514bf7-ccdc-42f0-a159-78d12f91e55c-utilities\") pod \"certified-operators-76mnr\" (UID: \"e7514bf7-ccdc-42f0-a159-78d12f91e55c\") " pod="openshift-marketplace/certified-operators-76mnr" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.624267 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7514bf7-ccdc-42f0-a159-78d12f91e55c-catalog-content\") pod \"certified-operators-76mnr\" (UID: \"e7514bf7-ccdc-42f0-a159-78d12f91e55c\") " pod="openshift-marketplace/certified-operators-76mnr" Nov 24 13:20:39 crc kubenswrapper[5039]: E1124 13:20:39.624576 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:40.124565302 +0000 UTC m=+152.563689802 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.725826 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9979w" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.725924 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:39 crc kubenswrapper[5039]: E1124 13:20:39.726108 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:40.226086471 +0000 UTC m=+152.665210971 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.726237 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ph79\" (UniqueName: \"kubernetes.io/projected/e7514bf7-ccdc-42f0-a159-78d12f91e55c-kube-api-access-4ph79\") pod \"certified-operators-76mnr\" (UID: \"e7514bf7-ccdc-42f0-a159-78d12f91e55c\") " pod="openshift-marketplace/certified-operators-76mnr" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.726323 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.726394 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7514bf7-ccdc-42f0-a159-78d12f91e55c-utilities\") pod \"certified-operators-76mnr\" (UID: \"e7514bf7-ccdc-42f0-a159-78d12f91e55c\") " pod="openshift-marketplace/certified-operators-76mnr" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.726414 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7514bf7-ccdc-42f0-a159-78d12f91e55c-catalog-content\") pod \"certified-operators-76mnr\" (UID: \"e7514bf7-ccdc-42f0-a159-78d12f91e55c\") " pod="openshift-marketplace/certified-operators-76mnr" Nov 24 13:20:39 crc kubenswrapper[5039]: E1124 13:20:39.726689 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:40.226677317 +0000 UTC m=+152.665802007 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.726963 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7514bf7-ccdc-42f0-a159-78d12f91e55c-utilities\") pod \"certified-operators-76mnr\" (UID: \"e7514bf7-ccdc-42f0-a159-78d12f91e55c\") " pod="openshift-marketplace/certified-operators-76mnr" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.727041 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7514bf7-ccdc-42f0-a159-78d12f91e55c-catalog-content\") pod \"certified-operators-76mnr\" (UID: \"e7514bf7-ccdc-42f0-a159-78d12f91e55c\") " pod="openshift-marketplace/certified-operators-76mnr" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.767157 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ph79\" (UniqueName: \"kubernetes.io/projected/e7514bf7-ccdc-42f0-a159-78d12f91e55c-kube-api-access-4ph79\") pod \"certified-operators-76mnr\" (UID: \"e7514bf7-ccdc-42f0-a159-78d12f91e55c\") " pod="openshift-marketplace/certified-operators-76mnr" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.785186 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tdpn4"] Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.786404 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tdpn4" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.805258 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tdpn4"] Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.810482 5039 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-6g9d2 container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.810564 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2" podUID="32c545f6-2f66-4212-a7d0-01eab2f40da7" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.810893 5039 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-6g9d2 container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.810921 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2" podUID="32c545f6-2f66-4212-a7d0-01eab2f40da7" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.828891 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.829157 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30fffdbb-286b-47ad-887b-fd8ec67725d6-utilities\") pod \"community-operators-tdpn4\" (UID: \"30fffdbb-286b-47ad-887b-fd8ec67725d6\") " pod="openshift-marketplace/community-operators-tdpn4" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.829197 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v82zn\" (UniqueName: \"kubernetes.io/projected/30fffdbb-286b-47ad-887b-fd8ec67725d6-kube-api-access-v82zn\") pod \"community-operators-tdpn4\" (UID: \"30fffdbb-286b-47ad-887b-fd8ec67725d6\") " pod="openshift-marketplace/community-operators-tdpn4" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.829248 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30fffdbb-286b-47ad-887b-fd8ec67725d6-catalog-content\") pod \"community-operators-tdpn4\" (UID: \"30fffdbb-286b-47ad-887b-fd8ec67725d6\") " pod="openshift-marketplace/community-operators-tdpn4" Nov 24 13:20:39 crc kubenswrapper[5039]: E1124 13:20:39.829361 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:40.329346235 +0000 UTC m=+152.768470725 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.846626 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.847225 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.852992 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.853190 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.863677 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.909045 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-76mnr" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.917659 5039 patch_prober.go:28] interesting pod/router-default-5444994796-4hh9n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 13:20:39 crc kubenswrapper[5039]: [-]has-synced failed: reason withheld Nov 24 13:20:39 crc kubenswrapper[5039]: [+]process-running ok Nov 24 13:20:39 crc kubenswrapper[5039]: healthz check failed Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.917714 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4hh9n" podUID="25637e2c-a1e3-4449-a549-7b081d0c4c4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.931647 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30fffdbb-286b-47ad-887b-fd8ec67725d6-utilities\") pod \"community-operators-tdpn4\" (UID: \"30fffdbb-286b-47ad-887b-fd8ec67725d6\") " pod="openshift-marketplace/community-operators-tdpn4" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.931695 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.931728 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/69a0e706-e6ab-4dee-bffa-5dd23c530205-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"69a0e706-e6ab-4dee-bffa-5dd23c530205\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.931752 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v82zn\" (UniqueName: \"kubernetes.io/projected/30fffdbb-286b-47ad-887b-fd8ec67725d6-kube-api-access-v82zn\") pod \"community-operators-tdpn4\" (UID: \"30fffdbb-286b-47ad-887b-fd8ec67725d6\") " pod="openshift-marketplace/community-operators-tdpn4" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.931769 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/69a0e706-e6ab-4dee-bffa-5dd23c530205-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"69a0e706-e6ab-4dee-bffa-5dd23c530205\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.931825 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30fffdbb-286b-47ad-887b-fd8ec67725d6-catalog-content\") pod \"community-operators-tdpn4\" (UID: \"30fffdbb-286b-47ad-887b-fd8ec67725d6\") " pod="openshift-marketplace/community-operators-tdpn4" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.932299 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30fffdbb-286b-47ad-887b-fd8ec67725d6-catalog-content\") pod \"community-operators-tdpn4\" (UID: \"30fffdbb-286b-47ad-887b-fd8ec67725d6\") " pod="openshift-marketplace/community-operators-tdpn4" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.932541 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30fffdbb-286b-47ad-887b-fd8ec67725d6-utilities\") pod \"community-operators-tdpn4\" (UID: \"30fffdbb-286b-47ad-887b-fd8ec67725d6\") " pod="openshift-marketplace/community-operators-tdpn4" Nov 24 13:20:39 crc kubenswrapper[5039]: E1124 13:20:39.932838 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:40.432825835 +0000 UTC m=+152.871950335 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.960429 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v82zn\" (UniqueName: \"kubernetes.io/projected/30fffdbb-286b-47ad-887b-fd8ec67725d6-kube-api-access-v82zn\") pod \"community-operators-tdpn4\" (UID: \"30fffdbb-286b-47ad-887b-fd8ec67725d6\") " pod="openshift-marketplace/community-operators-tdpn4" Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.985532 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hvqj8"] Nov 24 13:20:39 crc kubenswrapper[5039]: I1124 13:20:39.993786 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hvqj8" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.009213 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hvqj8"] Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.033016 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.033452 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e77b6e7-8d78-4d0d-ab33-c27c4d168b74-catalog-content\") pod \"certified-operators-hvqj8\" (UID: \"8e77b6e7-8d78-4d0d-ab33-c27c4d168b74\") " pod="openshift-marketplace/certified-operators-hvqj8" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.033566 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e77b6e7-8d78-4d0d-ab33-c27c4d168b74-utilities\") pod \"certified-operators-hvqj8\" (UID: \"8e77b6e7-8d78-4d0d-ab33-c27c4d168b74\") " pod="openshift-marketplace/certified-operators-hvqj8" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.033599 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/69a0e706-e6ab-4dee-bffa-5dd23c530205-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"69a0e706-e6ab-4dee-bffa-5dd23c530205\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.033624 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/69a0e706-e6ab-4dee-bffa-5dd23c530205-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"69a0e706-e6ab-4dee-bffa-5dd23c530205\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.033688 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gz8gz\" (UniqueName: \"kubernetes.io/projected/8e77b6e7-8d78-4d0d-ab33-c27c4d168b74-kube-api-access-gz8gz\") pod \"certified-operators-hvqj8\" (UID: \"8e77b6e7-8d78-4d0d-ab33-c27c4d168b74\") " pod="openshift-marketplace/certified-operators-hvqj8" Nov 24 13:20:40 crc kubenswrapper[5039]: E1124 13:20:40.033856 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:40.533826911 +0000 UTC m=+152.972951411 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.034080 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/69a0e706-e6ab-4dee-bffa-5dd23c530205-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"69a0e706-e6ab-4dee-bffa-5dd23c530205\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.077719 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/69a0e706-e6ab-4dee-bffa-5dd23c530205-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"69a0e706-e6ab-4dee-bffa-5dd23c530205\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.119393 5039 patch_prober.go:28] interesting pod/downloads-7954f5f757-98dp9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.119479 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-98dp9" podUID="c7032d1d-5aae-4e50-b10f-3df40a0cd983" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.119998 5039 patch_prober.go:28] interesting pod/downloads-7954f5f757-98dp9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.120027 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-98dp9" podUID="c7032d1d-5aae-4e50-b10f-3df40a0cd983" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.123787 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tdpn4" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.140186 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gz8gz\" (UniqueName: \"kubernetes.io/projected/8e77b6e7-8d78-4d0d-ab33-c27c4d168b74-kube-api-access-gz8gz\") pod \"certified-operators-hvqj8\" (UID: \"8e77b6e7-8d78-4d0d-ab33-c27c4d168b74\") " pod="openshift-marketplace/certified-operators-hvqj8" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.140287 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e77b6e7-8d78-4d0d-ab33-c27c4d168b74-catalog-content\") pod \"certified-operators-hvqj8\" (UID: \"8e77b6e7-8d78-4d0d-ab33-c27c4d168b74\") " pod="openshift-marketplace/certified-operators-hvqj8" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.140356 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e77b6e7-8d78-4d0d-ab33-c27c4d168b74-utilities\") pod \"certified-operators-hvqj8\" (UID: \"8e77b6e7-8d78-4d0d-ab33-c27c4d168b74\") " pod="openshift-marketplace/certified-operators-hvqj8" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.140378 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:40 crc kubenswrapper[5039]: E1124 13:20:40.140993 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:40.640975296 +0000 UTC m=+153.080099796 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.141900 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e77b6e7-8d78-4d0d-ab33-c27c4d168b74-catalog-content\") pod \"certified-operators-hvqj8\" (UID: \"8e77b6e7-8d78-4d0d-ab33-c27c4d168b74\") " pod="openshift-marketplace/certified-operators-hvqj8" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.142095 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e77b6e7-8d78-4d0d-ab33-c27c4d168b74-utilities\") pod \"certified-operators-hvqj8\" (UID: \"8e77b6e7-8d78-4d0d-ab33-c27c4d168b74\") " pod="openshift-marketplace/certified-operators-hvqj8" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.180330 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gz8gz\" (UniqueName: \"kubernetes.io/projected/8e77b6e7-8d78-4d0d-ab33-c27c4d168b74-kube-api-access-gz8gz\") pod \"certified-operators-hvqj8\" (UID: \"8e77b6e7-8d78-4d0d-ab33-c27c4d168b74\") " pod="openshift-marketplace/certified-operators-hvqj8" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.181518 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.259203 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:40 crc kubenswrapper[5039]: E1124 13:20:40.259967 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:40.759938775 +0000 UTC m=+153.199063275 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.260019 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:40 crc kubenswrapper[5039]: E1124 13:20:40.261204 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:40.761191287 +0000 UTC m=+153.200315787 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.261856 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9979w"] Nov 24 13:20:40 crc kubenswrapper[5039]: W1124 13:20:40.290792 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaab10f23_6223_4554_9a20_3669e7e0eb72.slice/crio-0f7a3ec88cf752e692357c2bbb7d799fbd93d64f6c04a44635a7fb3a222f26e0 WatchSource:0}: Error finding container 0f7a3ec88cf752e692357c2bbb7d799fbd93d64f6c04a44635a7fb3a222f26e0: Status 404 returned error can't find the container with id 0f7a3ec88cf752e692357c2bbb7d799fbd93d64f6c04a44635a7fb3a222f26e0 Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.328038 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.340882 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hvqj8" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.366006 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:40 crc kubenswrapper[5039]: E1124 13:20:40.366409 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:40.866390902 +0000 UTC m=+153.305515402 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.397828 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.397866 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.434752 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-c886j" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.467543 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:40 crc kubenswrapper[5039]: E1124 13:20:40.469247 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:40.969232455 +0000 UTC m=+153.408356955 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.574144 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:40 crc kubenswrapper[5039]: E1124 13:20:40.575628 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:41.07560998 +0000 UTC m=+153.514734470 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.599822 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-76mnr"] Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.652791 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tdpn4"] Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.654535 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.674736 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6l58p" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.676252 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:40 crc kubenswrapper[5039]: E1124 13:20:40.677876 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:41.177851228 +0000 UTC m=+153.616975728 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:40 crc kubenswrapper[5039]: W1124 13:20:40.700082 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod30fffdbb_286b_47ad_887b_fd8ec67725d6.slice/crio-4ea77a08f8f662011b569ae6898b5b274584171a3a495cf723c3544630da7f3c WatchSource:0}: Error finding container 4ea77a08f8f662011b569ae6898b5b274584171a3a495cf723c3544630da7f3c: Status 404 returned error can't find the container with id 4ea77a08f8f662011b569ae6898b5b274584171a3a495cf723c3544630da7f3c Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.778174 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:40 crc kubenswrapper[5039]: E1124 13:20:40.778761 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:41.278740461 +0000 UTC m=+153.717864961 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.823732 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.842125 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.851229 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.871589 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.881209 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:40 crc kubenswrapper[5039]: E1124 13:20:40.884903 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:41.38488652 +0000 UTC m=+153.824011020 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.904869 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-4hh9n" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.916404 5039 patch_prober.go:28] interesting pod/router-default-5444994796-4hh9n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 13:20:40 crc kubenswrapper[5039]: [-]has-synced failed: reason withheld Nov 24 13:20:40 crc kubenswrapper[5039]: [+]process-running ok Nov 24 13:20:40 crc kubenswrapper[5039]: healthz check failed Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.916476 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4hh9n" podUID="25637e2c-a1e3-4449-a549-7b081d0c4c4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.994307 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:40 crc kubenswrapper[5039]: E1124 13:20:40.995779 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:41.495731809 +0000 UTC m=+153.934856319 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:40 crc kubenswrapper[5039]: I1124 13:20:40.997780 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:40 crc kubenswrapper[5039]: E1124 13:20:40.998287 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:41.498272985 +0000 UTC m=+153.937397475 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.063296 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hvqj8"] Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.099113 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:41 crc kubenswrapper[5039]: E1124 13:20:41.099903 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:41.599882536 +0000 UTC m=+154.039007036 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.194341 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"69a0e706-e6ab-4dee-bffa-5dd23c530205","Type":"ContainerStarted","Data":"d0068b8e54890651f99e46b96d1525dee75e76b6f7617121ba5a929ba110f84f"} Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.200133 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:41 crc kubenswrapper[5039]: E1124 13:20:41.200456 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:41.700444151 +0000 UTC m=+154.139568651 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.202412 5039 generic.go:334] "Generic (PLEG): container finished" podID="30fffdbb-286b-47ad-887b-fd8ec67725d6" containerID="f7455a1b83d13bec7c15b846eaa8bda46a116b0629ddc5943c31c825eec0b7eb" exitCode=0 Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.202479 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdpn4" event={"ID":"30fffdbb-286b-47ad-887b-fd8ec67725d6","Type":"ContainerDied","Data":"f7455a1b83d13bec7c15b846eaa8bda46a116b0629ddc5943c31c825eec0b7eb"} Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.202524 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdpn4" event={"ID":"30fffdbb-286b-47ad-887b-fd8ec67725d6","Type":"ContainerStarted","Data":"4ea77a08f8f662011b569ae6898b5b274584171a3a495cf723c3544630da7f3c"} Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.208162 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.224834 5039 generic.go:334] "Generic (PLEG): container finished" podID="e7514bf7-ccdc-42f0-a159-78d12f91e55c" containerID="ba208ecf6e5091a3f157925fa9989cbeec02603e1eddc4baffe0864fb1664480" exitCode=0 Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.225126 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-76mnr" event={"ID":"e7514bf7-ccdc-42f0-a159-78d12f91e55c","Type":"ContainerDied","Data":"ba208ecf6e5091a3f157925fa9989cbeec02603e1eddc4baffe0864fb1664480"} Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.225184 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-76mnr" event={"ID":"e7514bf7-ccdc-42f0-a159-78d12f91e55c","Type":"ContainerStarted","Data":"a74172e25633ec87467e3ee86841d0d32b9178d6940c4de9931da9cdf2af68ba"} Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.250229 5039 generic.go:334] "Generic (PLEG): container finished" podID="aab10f23-6223-4554-9a20-3669e7e0eb72" containerID="76eaa4d8419a259653cb282709ab09c84a27c5cc1ade28c3358a7bfd86c6820a" exitCode=0 Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.250398 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9979w" event={"ID":"aab10f23-6223-4554-9a20-3669e7e0eb72","Type":"ContainerDied","Data":"76eaa4d8419a259653cb282709ab09c84a27c5cc1ade28c3358a7bfd86c6820a"} Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.250439 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9979w" event={"ID":"aab10f23-6223-4554-9a20-3669e7e0eb72","Type":"ContainerStarted","Data":"0f7a3ec88cf752e692357c2bbb7d799fbd93d64f6c04a44635a7fb3a222f26e0"} Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.282468 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hvqj8" event={"ID":"8e77b6e7-8d78-4d0d-ab33-c27c4d168b74","Type":"ContainerStarted","Data":"6f21ef6000a028a0df48bc3322c7e896cdbf303ea9b5f0b2300034efd9e1af65"} Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.302180 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:41 crc kubenswrapper[5039]: E1124 13:20:41.303588 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:41.803570892 +0000 UTC m=+154.242695392 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.349761 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bmdnh" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.400656 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.401029 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.402606 5039 patch_prober.go:28] interesting pod/console-f9d7485db-zqgfl container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.22:8443/health\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.402636 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-zqgfl" podUID="663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2" containerName="console" probeResult="failure" output="Get \"https://10.217.0.22:8443/health\": dial tcp 10.217.0.22:8443: connect: connection refused" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.403415 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:41 crc kubenswrapper[5039]: E1124 13:20:41.405611 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:41.905597165 +0000 UTC m=+154.344721665 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.412426 5039 patch_prober.go:28] interesting pod/apiserver-76f77b778f-9jvgw container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 24 13:20:41 crc kubenswrapper[5039]: [+]log ok Nov 24 13:20:41 crc kubenswrapper[5039]: [+]etcd ok Nov 24 13:20:41 crc kubenswrapper[5039]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 24 13:20:41 crc kubenswrapper[5039]: [+]poststarthook/generic-apiserver-start-informers ok Nov 24 13:20:41 crc kubenswrapper[5039]: [+]poststarthook/max-in-flight-filter ok Nov 24 13:20:41 crc kubenswrapper[5039]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 24 13:20:41 crc kubenswrapper[5039]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 24 13:20:41 crc kubenswrapper[5039]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 24 13:20:41 crc kubenswrapper[5039]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Nov 24 13:20:41 crc kubenswrapper[5039]: [+]poststarthook/project.openshift.io-projectcache ok Nov 24 13:20:41 crc kubenswrapper[5039]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 24 13:20:41 crc kubenswrapper[5039]: [+]poststarthook/openshift.io-startinformers ok Nov 24 13:20:41 crc kubenswrapper[5039]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 24 13:20:41 crc kubenswrapper[5039]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 24 13:20:41 crc kubenswrapper[5039]: livez check failed Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.412465 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" podUID="bc7f5002-5906-428a-bb9e-c3507cc151c8" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.506876 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:41 crc kubenswrapper[5039]: E1124 13:20:41.507031 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:42.007003941 +0000 UTC m=+154.446128441 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.507191 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:41 crc kubenswrapper[5039]: E1124 13:20:41.515807 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:42.015787268 +0000 UTC m=+154.454911758 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.574590 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-cd9cq"] Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.575541 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cd9cq" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.577013 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.582413 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-k884h" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.597058 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cd9cq"] Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.608003 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:41 crc kubenswrapper[5039]: E1124 13:20:41.608288 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:42.108274954 +0000 UTC m=+154.547399454 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.609029 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5q4db" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.633032 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-g5ldp" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.709697 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37739944-4511-4fe7-95df-09d42974532e-utilities\") pod \"redhat-marketplace-cd9cq\" (UID: \"37739944-4511-4fe7-95df-09d42974532e\") " pod="openshift-marketplace/redhat-marketplace-cd9cq" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.709783 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9q8wx\" (UniqueName: \"kubernetes.io/projected/37739944-4511-4fe7-95df-09d42974532e-kube-api-access-9q8wx\") pod \"redhat-marketplace-cd9cq\" (UID: \"37739944-4511-4fe7-95df-09d42974532e\") " pod="openshift-marketplace/redhat-marketplace-cd9cq" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.709820 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37739944-4511-4fe7-95df-09d42974532e-catalog-content\") pod \"redhat-marketplace-cd9cq\" (UID: \"37739944-4511-4fe7-95df-09d42974532e\") " pod="openshift-marketplace/redhat-marketplace-cd9cq" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.709851 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:41 crc kubenswrapper[5039]: E1124 13:20:41.710931 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:42.210904752 +0000 UTC m=+154.650029252 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.811351 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:41 crc kubenswrapper[5039]: E1124 13:20:41.811608 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:42.311572059 +0000 UTC m=+154.750696569 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.812246 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37739944-4511-4fe7-95df-09d42974532e-utilities\") pod \"redhat-marketplace-cd9cq\" (UID: \"37739944-4511-4fe7-95df-09d42974532e\") " pod="openshift-marketplace/redhat-marketplace-cd9cq" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.812295 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9q8wx\" (UniqueName: \"kubernetes.io/projected/37739944-4511-4fe7-95df-09d42974532e-kube-api-access-9q8wx\") pod \"redhat-marketplace-cd9cq\" (UID: \"37739944-4511-4fe7-95df-09d42974532e\") " pod="openshift-marketplace/redhat-marketplace-cd9cq" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.812319 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37739944-4511-4fe7-95df-09d42974532e-catalog-content\") pod \"redhat-marketplace-cd9cq\" (UID: \"37739944-4511-4fe7-95df-09d42974532e\") " pod="openshift-marketplace/redhat-marketplace-cd9cq" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.812344 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:41 crc kubenswrapper[5039]: E1124 13:20:41.812791 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:42.3127741 +0000 UTC m=+154.751898590 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.812799 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37739944-4511-4fe7-95df-09d42974532e-utilities\") pod \"redhat-marketplace-cd9cq\" (UID: \"37739944-4511-4fe7-95df-09d42974532e\") " pod="openshift-marketplace/redhat-marketplace-cd9cq" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.815769 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37739944-4511-4fe7-95df-09d42974532e-catalog-content\") pod \"redhat-marketplace-cd9cq\" (UID: \"37739944-4511-4fe7-95df-09d42974532e\") " pod="openshift-marketplace/redhat-marketplace-cd9cq" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.845861 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9q8wx\" (UniqueName: \"kubernetes.io/projected/37739944-4511-4fe7-95df-09d42974532e-kube-api-access-9q8wx\") pod \"redhat-marketplace-cd9cq\" (UID: \"37739944-4511-4fe7-95df-09d42974532e\") " pod="openshift-marketplace/redhat-marketplace-cd9cq" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.898191 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cd9cq" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.906036 5039 patch_prober.go:28] interesting pod/router-default-5444994796-4hh9n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 13:20:41 crc kubenswrapper[5039]: [-]has-synced failed: reason withheld Nov 24 13:20:41 crc kubenswrapper[5039]: [+]process-running ok Nov 24 13:20:41 crc kubenswrapper[5039]: healthz check failed Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.906108 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4hh9n" podUID="25637e2c-a1e3-4449-a549-7b081d0c4c4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.913872 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:41 crc kubenswrapper[5039]: E1124 13:20:41.914133 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:42.414104655 +0000 UTC m=+154.853229155 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.914466 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:41 crc kubenswrapper[5039]: E1124 13:20:41.914839 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:42.414831144 +0000 UTC m=+154.853955644 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.982918 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-gj66q"] Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.983804 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gj66q" Nov 24 13:20:41 crc kubenswrapper[5039]: I1124 13:20:41.991213 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gj66q"] Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.018394 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:42 crc kubenswrapper[5039]: E1124 13:20:42.019081 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:42.519060953 +0000 UTC m=+154.958185463 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.121453 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kr676\" (UniqueName: \"kubernetes.io/projected/6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0-kube-api-access-kr676\") pod \"redhat-marketplace-gj66q\" (UID: \"6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0\") " pod="openshift-marketplace/redhat-marketplace-gj66q" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.121694 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0-utilities\") pod \"redhat-marketplace-gj66q\" (UID: \"6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0\") " pod="openshift-marketplace/redhat-marketplace-gj66q" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.121723 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0-catalog-content\") pod \"redhat-marketplace-gj66q\" (UID: \"6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0\") " pod="openshift-marketplace/redhat-marketplace-gj66q" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.121751 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:42 crc kubenswrapper[5039]: E1124 13:20:42.122046 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:42.62203497 +0000 UTC m=+155.061159470 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.128710 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cd9cq"] Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.224444 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:42 crc kubenswrapper[5039]: E1124 13:20:42.224663 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:42.724631597 +0000 UTC m=+155.163756097 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.224750 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0-utilities\") pod \"redhat-marketplace-gj66q\" (UID: \"6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0\") " pod="openshift-marketplace/redhat-marketplace-gj66q" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.224786 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0-catalog-content\") pod \"redhat-marketplace-gj66q\" (UID: \"6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0\") " pod="openshift-marketplace/redhat-marketplace-gj66q" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.224818 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.224908 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kr676\" (UniqueName: \"kubernetes.io/projected/6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0-kube-api-access-kr676\") pod \"redhat-marketplace-gj66q\" (UID: \"6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0\") " pod="openshift-marketplace/redhat-marketplace-gj66q" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.225713 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0-utilities\") pod \"redhat-marketplace-gj66q\" (UID: \"6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0\") " pod="openshift-marketplace/redhat-marketplace-gj66q" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.225980 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0-catalog-content\") pod \"redhat-marketplace-gj66q\" (UID: \"6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0\") " pod="openshift-marketplace/redhat-marketplace-gj66q" Nov 24 13:20:42 crc kubenswrapper[5039]: E1124 13:20:42.226219 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:42.726205817 +0000 UTC m=+155.165330317 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.244404 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kr676\" (UniqueName: \"kubernetes.io/projected/6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0-kube-api-access-kr676\") pod \"redhat-marketplace-gj66q\" (UID: \"6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0\") " pod="openshift-marketplace/redhat-marketplace-gj66q" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.299240 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" event={"ID":"21cbf909-e064-465c-bb64-5b0d5c82d691","Type":"ContainerStarted","Data":"3dab9bca9bb6417fde7724d7d7efd29e6417d2cd0b00085264b2e6b44ddbf9bf"} Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.300850 5039 generic.go:334] "Generic (PLEG): container finished" podID="8e77b6e7-8d78-4d0d-ab33-c27c4d168b74" containerID="8d5096b6f4b8705d2e77a594052ed82d7a7dd5420809393dc720b4c2ca46ddae" exitCode=0 Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.300937 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hvqj8" event={"ID":"8e77b6e7-8d78-4d0d-ab33-c27c4d168b74","Type":"ContainerDied","Data":"8d5096b6f4b8705d2e77a594052ed82d7a7dd5420809393dc720b4c2ca46ddae"} Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.317690 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"69a0e706-e6ab-4dee-bffa-5dd23c530205","Type":"ContainerStarted","Data":"c8314137cd3a0eaeba29ba9c9a25c1cdda035e35887b37605355b4d391965277"} Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.317727 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cd9cq" event={"ID":"37739944-4511-4fe7-95df-09d42974532e","Type":"ContainerStarted","Data":"69d11a45ef96500b26081bc989c146bf0de76ea6780cf430d1a5e5bc8d20f8d8"} Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.327204 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.327635 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gj66q" Nov 24 13:20:42 crc kubenswrapper[5039]: E1124 13:20:42.327735 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:42.827668825 +0000 UTC m=+155.266793325 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.328048 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:42 crc kubenswrapper[5039]: E1124 13:20:42.328561 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:42.828547818 +0000 UTC m=+155.267672418 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.374095 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=3.374071253 podStartE2EDuration="3.374071253s" podCreationTimestamp="2025-11-24 13:20:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:42.37090253 +0000 UTC m=+154.810027030" watchObservedRunningTime="2025-11-24 13:20:42.374071253 +0000 UTC m=+154.813195753" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.429469 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:42 crc kubenswrapper[5039]: E1124 13:20:42.430551 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:42.930534619 +0000 UTC m=+155.369659119 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.530797 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:42 crc kubenswrapper[5039]: E1124 13:20:42.531176 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:43.031161306 +0000 UTC m=+155.470285806 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.552068 5039 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.559657 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.560244 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.566546 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.568044 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.574939 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.586068 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-x68z7"] Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.618874 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gj66q"] Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.619732 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x68z7" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.625064 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-x68z7"] Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.625561 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.633994 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.634544 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b572c340-7119-4771-82b7-44f475fd9c82-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"b572c340-7119-4771-82b7-44f475fd9c82\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.634587 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b572c340-7119-4771-82b7-44f475fd9c82-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"b572c340-7119-4771-82b7-44f475fd9c82\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 13:20:42 crc kubenswrapper[5039]: E1124 13:20:42.634806 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 13:20:43.134788409 +0000 UTC m=+155.573912899 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.735615 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.735685 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b572c340-7119-4771-82b7-44f475fd9c82-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"b572c340-7119-4771-82b7-44f475fd9c82\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.735762 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x72vn\" (UniqueName: \"kubernetes.io/projected/af4ec023-8129-4c1b-99c4-20e814084d4a-kube-api-access-x72vn\") pod \"redhat-operators-x68z7\" (UID: \"af4ec023-8129-4c1b-99c4-20e814084d4a\") " pod="openshift-marketplace/redhat-operators-x68z7" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.735796 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b572c340-7119-4771-82b7-44f475fd9c82-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"b572c340-7119-4771-82b7-44f475fd9c82\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.735821 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af4ec023-8129-4c1b-99c4-20e814084d4a-utilities\") pod \"redhat-operators-x68z7\" (UID: \"af4ec023-8129-4c1b-99c4-20e814084d4a\") " pod="openshift-marketplace/redhat-operators-x68z7" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.735837 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af4ec023-8129-4c1b-99c4-20e814084d4a-catalog-content\") pod \"redhat-operators-x68z7\" (UID: \"af4ec023-8129-4c1b-99c4-20e814084d4a\") " pod="openshift-marketplace/redhat-operators-x68z7" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.735890 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b572c340-7119-4771-82b7-44f475fd9c82-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"b572c340-7119-4771-82b7-44f475fd9c82\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 13:20:42 crc kubenswrapper[5039]: E1124 13:20:42.736234 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 13:20:43.236217926 +0000 UTC m=+155.675342536 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2swfx" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.753323 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b572c340-7119-4771-82b7-44f475fd9c82-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"b572c340-7119-4771-82b7-44f475fd9c82\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.803355 5039 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-24T13:20:42.552090356Z","Handler":null,"Name":""} Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.805577 5039 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.805610 5039 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.807315 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6g9d2" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.836468 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.836841 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x72vn\" (UniqueName: \"kubernetes.io/projected/af4ec023-8129-4c1b-99c4-20e814084d4a-kube-api-access-x72vn\") pod \"redhat-operators-x68z7\" (UID: \"af4ec023-8129-4c1b-99c4-20e814084d4a\") " pod="openshift-marketplace/redhat-operators-x68z7" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.836880 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af4ec023-8129-4c1b-99c4-20e814084d4a-utilities\") pod \"redhat-operators-x68z7\" (UID: \"af4ec023-8129-4c1b-99c4-20e814084d4a\") " pod="openshift-marketplace/redhat-operators-x68z7" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.836898 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af4ec023-8129-4c1b-99c4-20e814084d4a-catalog-content\") pod \"redhat-operators-x68z7\" (UID: \"af4ec023-8129-4c1b-99c4-20e814084d4a\") " pod="openshift-marketplace/redhat-operators-x68z7" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.837741 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af4ec023-8129-4c1b-99c4-20e814084d4a-catalog-content\") pod \"redhat-operators-x68z7\" (UID: \"af4ec023-8129-4c1b-99c4-20e814084d4a\") " pod="openshift-marketplace/redhat-operators-x68z7" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.837892 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af4ec023-8129-4c1b-99c4-20e814084d4a-utilities\") pod \"redhat-operators-x68z7\" (UID: \"af4ec023-8129-4c1b-99c4-20e814084d4a\") " pod="openshift-marketplace/redhat-operators-x68z7" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.848693 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.854730 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x72vn\" (UniqueName: \"kubernetes.io/projected/af4ec023-8129-4c1b-99c4-20e814084d4a-kube-api-access-x72vn\") pod \"redhat-operators-x68z7\" (UID: \"af4ec023-8129-4c1b-99c4-20e814084d4a\") " pod="openshift-marketplace/redhat-operators-x68z7" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.897767 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.906128 5039 patch_prober.go:28] interesting pod/router-default-5444994796-4hh9n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 13:20:42 crc kubenswrapper[5039]: [-]has-synced failed: reason withheld Nov 24 13:20:42 crc kubenswrapper[5039]: [+]process-running ok Nov 24 13:20:42 crc kubenswrapper[5039]: healthz check failed Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.906194 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4hh9n" podUID="25637e2c-a1e3-4449-a549-7b081d0c4c4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.938265 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.962915 5039 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.962965 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.978937 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jbzck"] Nov 24 13:20:42 crc kubenswrapper[5039]: I1124 13:20:42.979933 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jbzck" Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.003183 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x68z7" Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.006914 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jbzck"] Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.039454 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82a55c8b-6639-49c5-b57a-99ed016d7e7c-utilities\") pod \"redhat-operators-jbzck\" (UID: \"82a55c8b-6639-49c5-b57a-99ed016d7e7c\") " pod="openshift-marketplace/redhat-operators-jbzck" Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.039538 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82a55c8b-6639-49c5-b57a-99ed016d7e7c-catalog-content\") pod \"redhat-operators-jbzck\" (UID: \"82a55c8b-6639-49c5-b57a-99ed016d7e7c\") " pod="openshift-marketplace/redhat-operators-jbzck" Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.039577 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qz5v7\" (UniqueName: \"kubernetes.io/projected/82a55c8b-6639-49c5-b57a-99ed016d7e7c-kube-api-access-qz5v7\") pod \"redhat-operators-jbzck\" (UID: \"82a55c8b-6639-49c5-b57a-99ed016d7e7c\") " pod="openshift-marketplace/redhat-operators-jbzck" Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.070466 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2swfx\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.133577 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.145168 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82a55c8b-6639-49c5-b57a-99ed016d7e7c-utilities\") pod \"redhat-operators-jbzck\" (UID: \"82a55c8b-6639-49c5-b57a-99ed016d7e7c\") " pod="openshift-marketplace/redhat-operators-jbzck" Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.145245 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82a55c8b-6639-49c5-b57a-99ed016d7e7c-catalog-content\") pod \"redhat-operators-jbzck\" (UID: \"82a55c8b-6639-49c5-b57a-99ed016d7e7c\") " pod="openshift-marketplace/redhat-operators-jbzck" Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.145284 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qz5v7\" (UniqueName: \"kubernetes.io/projected/82a55c8b-6639-49c5-b57a-99ed016d7e7c-kube-api-access-qz5v7\") pod \"redhat-operators-jbzck\" (UID: \"82a55c8b-6639-49c5-b57a-99ed016d7e7c\") " pod="openshift-marketplace/redhat-operators-jbzck" Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.145632 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82a55c8b-6639-49c5-b57a-99ed016d7e7c-utilities\") pod \"redhat-operators-jbzck\" (UID: \"82a55c8b-6639-49c5-b57a-99ed016d7e7c\") " pod="openshift-marketplace/redhat-operators-jbzck" Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.145991 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82a55c8b-6639-49c5-b57a-99ed016d7e7c-catalog-content\") pod \"redhat-operators-jbzck\" (UID: \"82a55c8b-6639-49c5-b57a-99ed016d7e7c\") " pod="openshift-marketplace/redhat-operators-jbzck" Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.170781 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qz5v7\" (UniqueName: \"kubernetes.io/projected/82a55c8b-6639-49c5-b57a-99ed016d7e7c-kube-api-access-qz5v7\") pod \"redhat-operators-jbzck\" (UID: \"82a55c8b-6639-49c5-b57a-99ed016d7e7c\") " pod="openshift-marketplace/redhat-operators-jbzck" Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.215996 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.250109 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-x68z7"] Nov 24 13:20:43 crc kubenswrapper[5039]: W1124 13:20:43.294133 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaf4ec023_8129_4c1b_99c4_20e814084d4a.slice/crio-1da2333063fd57ae02561ff6a88323d25bc97078ec87725424d56ee848fdb9d3 WatchSource:0}: Error finding container 1da2333063fd57ae02561ff6a88323d25bc97078ec87725424d56ee848fdb9d3: Status 404 returned error can't find the container with id 1da2333063fd57ae02561ff6a88323d25bc97078ec87725424d56ee848fdb9d3 Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.303826 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jbzck" Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.346136 5039 generic.go:334] "Generic (PLEG): container finished" podID="6170f687-30e2-44b0-860e-ddcee4e4f2d4" containerID="8fea76e0ae8d5965ad8f64d9623d6e39bda662baa57206f38c5b9d0e603594c1" exitCode=0 Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.346222 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54" event={"ID":"6170f687-30e2-44b0-860e-ddcee4e4f2d4","Type":"ContainerDied","Data":"8fea76e0ae8d5965ad8f64d9623d6e39bda662baa57206f38c5b9d0e603594c1"} Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.351202 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"b572c340-7119-4771-82b7-44f475fd9c82","Type":"ContainerStarted","Data":"185805b1b85ec684591c5c64c206aeefce2034a89f1777c39e8a57a441d8f511"} Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.362674 5039 generic.go:334] "Generic (PLEG): container finished" podID="6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0" containerID="ceac1b433e91998ccdc6abbf9823554887915bc49c970fbc896a9fee01460390" exitCode=0 Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.362794 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gj66q" event={"ID":"6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0","Type":"ContainerDied","Data":"ceac1b433e91998ccdc6abbf9823554887915bc49c970fbc896a9fee01460390"} Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.362841 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gj66q" event={"ID":"6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0","Type":"ContainerStarted","Data":"095dd7e2d7557ac06e0eb209042efb0a172806c4a8ad1af7bd78fbcab335a91b"} Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.389349 5039 generic.go:334] "Generic (PLEG): container finished" podID="69a0e706-e6ab-4dee-bffa-5dd23c530205" containerID="c8314137cd3a0eaeba29ba9c9a25c1cdda035e35887b37605355b4d391965277" exitCode=0 Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.389644 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"69a0e706-e6ab-4dee-bffa-5dd23c530205","Type":"ContainerDied","Data":"c8314137cd3a0eaeba29ba9c9a25c1cdda035e35887b37605355b4d391965277"} Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.394853 5039 generic.go:334] "Generic (PLEG): container finished" podID="37739944-4511-4fe7-95df-09d42974532e" containerID="8c03ec056c99f14b798bdf05f4ff01957a19483b65452e3473afc100f15fe80f" exitCode=0 Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.396989 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cd9cq" event={"ID":"37739944-4511-4fe7-95df-09d42974532e","Type":"ContainerDied","Data":"8c03ec056c99f14b798bdf05f4ff01957a19483b65452e3473afc100f15fe80f"} Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.431069 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" event={"ID":"21cbf909-e064-465c-bb64-5b0d5c82d691","Type":"ContainerStarted","Data":"5404987dffef829af117ecb020b98fea2876e682202642dc17e0d3e9d297b69e"} Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.441105 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x68z7" event={"ID":"af4ec023-8129-4c1b-99c4-20e814084d4a","Type":"ContainerStarted","Data":"1da2333063fd57ae02561ff6a88323d25bc97078ec87725424d56ee848fdb9d3"} Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.505130 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2swfx"] Nov 24 13:20:43 crc kubenswrapper[5039]: W1124 13:20:43.647348 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod750f36ae_2e78_4a6d_8e78_e315d507d436.slice/crio-ecb8280c7619e91ed039ceaa623b2113a26220346cf2e2777c9212bf8c2549df WatchSource:0}: Error finding container ecb8280c7619e91ed039ceaa623b2113a26220346cf2e2777c9212bf8c2549df: Status 404 returned error can't find the container with id ecb8280c7619e91ed039ceaa623b2113a26220346cf2e2777c9212bf8c2549df Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.690076 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jbzck"] Nov 24 13:20:43 crc kubenswrapper[5039]: W1124 13:20:43.695616 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod82a55c8b_6639_49c5_b57a_99ed016d7e7c.slice/crio-096143f81f16ec88dcfa8333133915a216f24cd691fe4e265a8a535cb3a61d39 WatchSource:0}: Error finding container 096143f81f16ec88dcfa8333133915a216f24cd691fe4e265a8a535cb3a61d39: Status 404 returned error can't find the container with id 096143f81f16ec88dcfa8333133915a216f24cd691fe4e265a8a535cb3a61d39 Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.906167 5039 patch_prober.go:28] interesting pod/router-default-5444994796-4hh9n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 13:20:43 crc kubenswrapper[5039]: [-]has-synced failed: reason withheld Nov 24 13:20:43 crc kubenswrapper[5039]: [+]process-running ok Nov 24 13:20:43 crc kubenswrapper[5039]: healthz check failed Nov 24 13:20:43 crc kubenswrapper[5039]: I1124 13:20:43.906228 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4hh9n" podUID="25637e2c-a1e3-4449-a549-7b081d0c4c4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.315942 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.451260 5039 generic.go:334] "Generic (PLEG): container finished" podID="b572c340-7119-4771-82b7-44f475fd9c82" containerID="e13733df5d250bb7455424aa2db06134271330147d63c1fe82e2947adb9f87ce" exitCode=0 Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.451329 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"b572c340-7119-4771-82b7-44f475fd9c82","Type":"ContainerDied","Data":"e13733df5d250bb7455424aa2db06134271330147d63c1fe82e2947adb9f87ce"} Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.454460 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" event={"ID":"750f36ae-2e78-4a6d-8e78-e315d507d436","Type":"ContainerStarted","Data":"50f2be6ae811de5262a20e8dc6d240c2d70084bc8cd64e3ac2238a4e04728f44"} Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.454487 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" event={"ID":"750f36ae-2e78-4a6d-8e78-e315d507d436","Type":"ContainerStarted","Data":"ecb8280c7619e91ed039ceaa623b2113a26220346cf2e2777c9212bf8c2549df"} Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.456676 5039 generic.go:334] "Generic (PLEG): container finished" podID="82a55c8b-6639-49c5-b57a-99ed016d7e7c" containerID="5046eda285cc8c6c9d033d0e9fe03d98ce8b3e2b30f9a83fb7d2ded0df0da43b" exitCode=0 Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.456743 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jbzck" event={"ID":"82a55c8b-6639-49c5-b57a-99ed016d7e7c","Type":"ContainerDied","Data":"5046eda285cc8c6c9d033d0e9fe03d98ce8b3e2b30f9a83fb7d2ded0df0da43b"} Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.456762 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jbzck" event={"ID":"82a55c8b-6639-49c5-b57a-99ed016d7e7c","Type":"ContainerStarted","Data":"096143f81f16ec88dcfa8333133915a216f24cd691fe4e265a8a535cb3a61d39"} Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.466551 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" event={"ID":"21cbf909-e064-465c-bb64-5b0d5c82d691","Type":"ContainerStarted","Data":"c8eb46cd94bbbe950e0274711f5892cb9120d9c8ea63bad3073b1ee34aff72dd"} Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.471294 5039 generic.go:334] "Generic (PLEG): container finished" podID="af4ec023-8129-4c1b-99c4-20e814084d4a" containerID="92538f189455c595c39ec33d229091377ae1ac5dad24d5d31bdd208769008513" exitCode=0 Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.471365 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x68z7" event={"ID":"af4ec023-8129-4c1b-99c4-20e814084d4a","Type":"ContainerDied","Data":"92538f189455c595c39ec33d229091377ae1ac5dad24d5d31bdd208769008513"} Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.484633 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" podStartSLOduration=131.484614217 podStartE2EDuration="2m11.484614217s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:44.484482303 +0000 UTC m=+156.923606823" watchObservedRunningTime="2025-11-24 13:20:44.484614217 +0000 UTC m=+156.923738717" Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.515630 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-pcqbh" podStartSLOduration=16.515465133 podStartE2EDuration="16.515465133s" podCreationTimestamp="2025-11-24 13:20:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:20:44.514281152 +0000 UTC m=+156.953405652" watchObservedRunningTime="2025-11-24 13:20:44.515465133 +0000 UTC m=+156.954589633" Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.761600 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54" Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.812723 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.899352 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fm2tl\" (UniqueName: \"kubernetes.io/projected/6170f687-30e2-44b0-860e-ddcee4e4f2d4-kube-api-access-fm2tl\") pod \"6170f687-30e2-44b0-860e-ddcee4e4f2d4\" (UID: \"6170f687-30e2-44b0-860e-ddcee4e4f2d4\") " Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.899393 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/69a0e706-e6ab-4dee-bffa-5dd23c530205-kubelet-dir\") pod \"69a0e706-e6ab-4dee-bffa-5dd23c530205\" (UID: \"69a0e706-e6ab-4dee-bffa-5dd23c530205\") " Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.899438 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/69a0e706-e6ab-4dee-bffa-5dd23c530205-kube-api-access\") pod \"69a0e706-e6ab-4dee-bffa-5dd23c530205\" (UID: \"69a0e706-e6ab-4dee-bffa-5dd23c530205\") " Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.899471 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6170f687-30e2-44b0-860e-ddcee4e4f2d4-secret-volume\") pod \"6170f687-30e2-44b0-860e-ddcee4e4f2d4\" (UID: \"6170f687-30e2-44b0-860e-ddcee4e4f2d4\") " Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.899532 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6170f687-30e2-44b0-860e-ddcee4e4f2d4-config-volume\") pod \"6170f687-30e2-44b0-860e-ddcee4e4f2d4\" (UID: \"6170f687-30e2-44b0-860e-ddcee4e4f2d4\") " Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.900444 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69a0e706-e6ab-4dee-bffa-5dd23c530205-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "69a0e706-e6ab-4dee-bffa-5dd23c530205" (UID: "69a0e706-e6ab-4dee-bffa-5dd23c530205"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.900811 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6170f687-30e2-44b0-860e-ddcee4e4f2d4-config-volume" (OuterVolumeSpecName: "config-volume") pod "6170f687-30e2-44b0-860e-ddcee4e4f2d4" (UID: "6170f687-30e2-44b0-860e-ddcee4e4f2d4"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.906808 5039 patch_prober.go:28] interesting pod/router-default-5444994796-4hh9n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 13:20:44 crc kubenswrapper[5039]: [-]has-synced failed: reason withheld Nov 24 13:20:44 crc kubenswrapper[5039]: [+]process-running ok Nov 24 13:20:44 crc kubenswrapper[5039]: healthz check failed Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.907112 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4hh9n" podUID="25637e2c-a1e3-4449-a549-7b081d0c4c4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.907168 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69a0e706-e6ab-4dee-bffa-5dd23c530205-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "69a0e706-e6ab-4dee-bffa-5dd23c530205" (UID: "69a0e706-e6ab-4dee-bffa-5dd23c530205"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.907544 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6170f687-30e2-44b0-860e-ddcee4e4f2d4-kube-api-access-fm2tl" (OuterVolumeSpecName: "kube-api-access-fm2tl") pod "6170f687-30e2-44b0-860e-ddcee4e4f2d4" (UID: "6170f687-30e2-44b0-860e-ddcee4e4f2d4"). InnerVolumeSpecName "kube-api-access-fm2tl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:20:44 crc kubenswrapper[5039]: I1124 13:20:44.907871 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6170f687-30e2-44b0-860e-ddcee4e4f2d4-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "6170f687-30e2-44b0-860e-ddcee4e4f2d4" (UID: "6170f687-30e2-44b0-860e-ddcee4e4f2d4"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:20:45 crc kubenswrapper[5039]: I1124 13:20:45.003083 5039 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6170f687-30e2-44b0-860e-ddcee4e4f2d4-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 13:20:45 crc kubenswrapper[5039]: I1124 13:20:45.003114 5039 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6170f687-30e2-44b0-860e-ddcee4e4f2d4-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 13:20:45 crc kubenswrapper[5039]: I1124 13:20:45.003125 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fm2tl\" (UniqueName: \"kubernetes.io/projected/6170f687-30e2-44b0-860e-ddcee4e4f2d4-kube-api-access-fm2tl\") on node \"crc\" DevicePath \"\"" Nov 24 13:20:45 crc kubenswrapper[5039]: I1124 13:20:45.003135 5039 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/69a0e706-e6ab-4dee-bffa-5dd23c530205-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 24 13:20:45 crc kubenswrapper[5039]: I1124 13:20:45.003143 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/69a0e706-e6ab-4dee-bffa-5dd23c530205-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 13:20:45 crc kubenswrapper[5039]: I1124 13:20:45.400053 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:45 crc kubenswrapper[5039]: I1124 13:20:45.404752 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-9jvgw" Nov 24 13:20:45 crc kubenswrapper[5039]: I1124 13:20:45.483336 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54" event={"ID":"6170f687-30e2-44b0-860e-ddcee4e4f2d4","Type":"ContainerDied","Data":"7a5d6fbd3434ce2d121edcce9e34e34e10173fcbb1ec851212ffd2a050f02810"} Nov 24 13:20:45 crc kubenswrapper[5039]: I1124 13:20:45.483374 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7a5d6fbd3434ce2d121edcce9e34e34e10173fcbb1ec851212ffd2a050f02810" Nov 24 13:20:45 crc kubenswrapper[5039]: I1124 13:20:45.483442 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54" Nov 24 13:20:45 crc kubenswrapper[5039]: I1124 13:20:45.500543 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"69a0e706-e6ab-4dee-bffa-5dd23c530205","Type":"ContainerDied","Data":"d0068b8e54890651f99e46b96d1525dee75e76b6f7617121ba5a929ba110f84f"} Nov 24 13:20:45 crc kubenswrapper[5039]: I1124 13:20:45.500599 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d0068b8e54890651f99e46b96d1525dee75e76b6f7617121ba5a929ba110f84f" Nov 24 13:20:45 crc kubenswrapper[5039]: I1124 13:20:45.501976 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 13:20:45 crc kubenswrapper[5039]: I1124 13:20:45.502031 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:20:45 crc kubenswrapper[5039]: I1124 13:20:45.776000 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 13:20:45 crc kubenswrapper[5039]: I1124 13:20:45.906559 5039 patch_prober.go:28] interesting pod/router-default-5444994796-4hh9n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 13:20:45 crc kubenswrapper[5039]: [-]has-synced failed: reason withheld Nov 24 13:20:45 crc kubenswrapper[5039]: [+]process-running ok Nov 24 13:20:45 crc kubenswrapper[5039]: healthz check failed Nov 24 13:20:45 crc kubenswrapper[5039]: I1124 13:20:45.906689 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4hh9n" podUID="25637e2c-a1e3-4449-a549-7b081d0c4c4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 13:20:45 crc kubenswrapper[5039]: I1124 13:20:45.923986 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b572c340-7119-4771-82b7-44f475fd9c82-kube-api-access\") pod \"b572c340-7119-4771-82b7-44f475fd9c82\" (UID: \"b572c340-7119-4771-82b7-44f475fd9c82\") " Nov 24 13:20:45 crc kubenswrapper[5039]: I1124 13:20:45.924180 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b572c340-7119-4771-82b7-44f475fd9c82-kubelet-dir\") pod \"b572c340-7119-4771-82b7-44f475fd9c82\" (UID: \"b572c340-7119-4771-82b7-44f475fd9c82\") " Nov 24 13:20:45 crc kubenswrapper[5039]: I1124 13:20:45.924471 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b572c340-7119-4771-82b7-44f475fd9c82-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "b572c340-7119-4771-82b7-44f475fd9c82" (UID: "b572c340-7119-4771-82b7-44f475fd9c82"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:20:45 crc kubenswrapper[5039]: I1124 13:20:45.959730 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b572c340-7119-4771-82b7-44f475fd9c82-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "b572c340-7119-4771-82b7-44f475fd9c82" (UID: "b572c340-7119-4771-82b7-44f475fd9c82"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:20:46 crc kubenswrapper[5039]: I1124 13:20:46.026276 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b572c340-7119-4771-82b7-44f475fd9c82-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 13:20:46 crc kubenswrapper[5039]: I1124 13:20:46.026321 5039 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b572c340-7119-4771-82b7-44f475fd9c82-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 24 13:20:46 crc kubenswrapper[5039]: I1124 13:20:46.512104 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"b572c340-7119-4771-82b7-44f475fd9c82","Type":"ContainerDied","Data":"185805b1b85ec684591c5c64c206aeefce2034a89f1777c39e8a57a441d8f511"} Nov 24 13:20:46 crc kubenswrapper[5039]: I1124 13:20:46.512137 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 13:20:46 crc kubenswrapper[5039]: I1124 13:20:46.512160 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="185805b1b85ec684591c5c64c206aeefce2034a89f1777c39e8a57a441d8f511" Nov 24 13:20:46 crc kubenswrapper[5039]: I1124 13:20:46.662496 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-29j84" Nov 24 13:20:46 crc kubenswrapper[5039]: I1124 13:20:46.905627 5039 patch_prober.go:28] interesting pod/router-default-5444994796-4hh9n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 13:20:46 crc kubenswrapper[5039]: [-]has-synced failed: reason withheld Nov 24 13:20:46 crc kubenswrapper[5039]: [+]process-running ok Nov 24 13:20:46 crc kubenswrapper[5039]: healthz check failed Nov 24 13:20:46 crc kubenswrapper[5039]: I1124 13:20:46.905989 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4hh9n" podUID="25637e2c-a1e3-4449-a549-7b081d0c4c4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 13:20:47 crc kubenswrapper[5039]: I1124 13:20:47.905731 5039 patch_prober.go:28] interesting pod/router-default-5444994796-4hh9n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 13:20:47 crc kubenswrapper[5039]: [-]has-synced failed: reason withheld Nov 24 13:20:47 crc kubenswrapper[5039]: [+]process-running ok Nov 24 13:20:47 crc kubenswrapper[5039]: healthz check failed Nov 24 13:20:47 crc kubenswrapper[5039]: I1124 13:20:47.905800 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4hh9n" podUID="25637e2c-a1e3-4449-a549-7b081d0c4c4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 13:20:48 crc kubenswrapper[5039]: I1124 13:20:48.909478 5039 patch_prober.go:28] interesting pod/router-default-5444994796-4hh9n container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 13:20:48 crc kubenswrapper[5039]: [-]has-synced failed: reason withheld Nov 24 13:20:48 crc kubenswrapper[5039]: [+]process-running ok Nov 24 13:20:48 crc kubenswrapper[5039]: healthz check failed Nov 24 13:20:48 crc kubenswrapper[5039]: I1124 13:20:48.909577 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4hh9n" podUID="25637e2c-a1e3-4449-a549-7b081d0c4c4c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 13:20:49 crc kubenswrapper[5039]: I1124 13:20:49.906175 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-4hh9n" Nov 24 13:20:49 crc kubenswrapper[5039]: I1124 13:20:49.909318 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-4hh9n" Nov 24 13:20:50 crc kubenswrapper[5039]: I1124 13:20:50.101932 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:20:50 crc kubenswrapper[5039]: I1124 13:20:50.101996 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:20:50 crc kubenswrapper[5039]: I1124 13:20:50.119617 5039 patch_prober.go:28] interesting pod/downloads-7954f5f757-98dp9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 24 13:20:50 crc kubenswrapper[5039]: I1124 13:20:50.119681 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-98dp9" podUID="c7032d1d-5aae-4e50-b10f-3df40a0cd983" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 24 13:20:50 crc kubenswrapper[5039]: I1124 13:20:50.119735 5039 patch_prober.go:28] interesting pod/downloads-7954f5f757-98dp9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 24 13:20:50 crc kubenswrapper[5039]: I1124 13:20:50.119791 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-98dp9" podUID="c7032d1d-5aae-4e50-b10f-3df40a0cd983" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 24 13:20:50 crc kubenswrapper[5039]: I1124 13:20:50.593332 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:20:51 crc kubenswrapper[5039]: I1124 13:20:51.401256 5039 patch_prober.go:28] interesting pod/console-f9d7485db-zqgfl container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.22:8443/health\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Nov 24 13:20:51 crc kubenswrapper[5039]: I1124 13:20:51.401308 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-zqgfl" podUID="663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2" containerName="console" probeResult="failure" output="Get \"https://10.217.0.22:8443/health\": dial tcp 10.217.0.22:8443: connect: connection refused" Nov 24 13:20:56 crc kubenswrapper[5039]: I1124 13:20:56.098668 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs\") pod \"network-metrics-daemon-vnpwt\" (UID: \"5926107d-81bc-4e34-9e27-8018cbccf590\") " pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:20:56 crc kubenswrapper[5039]: I1124 13:20:56.117294 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5926107d-81bc-4e34-9e27-8018cbccf590-metrics-certs\") pod \"network-metrics-daemon-vnpwt\" (UID: \"5926107d-81bc-4e34-9e27-8018cbccf590\") " pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:20:56 crc kubenswrapper[5039]: I1124 13:20:56.232177 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vnpwt" Nov 24 13:21:00 crc kubenswrapper[5039]: I1124 13:21:00.119581 5039 patch_prober.go:28] interesting pod/downloads-7954f5f757-98dp9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 24 13:21:00 crc kubenswrapper[5039]: I1124 13:21:00.119696 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-98dp9" podUID="c7032d1d-5aae-4e50-b10f-3df40a0cd983" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 24 13:21:00 crc kubenswrapper[5039]: I1124 13:21:00.119882 5039 patch_prober.go:28] interesting pod/downloads-7954f5f757-98dp9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 24 13:21:00 crc kubenswrapper[5039]: I1124 13:21:00.119988 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-98dp9" podUID="c7032d1d-5aae-4e50-b10f-3df40a0cd983" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 24 13:21:00 crc kubenswrapper[5039]: I1124 13:21:00.120016 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-98dp9" Nov 24 13:21:00 crc kubenswrapper[5039]: I1124 13:21:00.122761 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"be4e741e1ccc5c5d4353f637a3d458b95d1c4f92e5dfcc97d8c7a239004f5bf9"} pod="openshift-console/downloads-7954f5f757-98dp9" containerMessage="Container download-server failed liveness probe, will be restarted" Nov 24 13:21:00 crc kubenswrapper[5039]: I1124 13:21:00.122987 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-98dp9" podUID="c7032d1d-5aae-4e50-b10f-3df40a0cd983" containerName="download-server" containerID="cri-o://be4e741e1ccc5c5d4353f637a3d458b95d1c4f92e5dfcc97d8c7a239004f5bf9" gracePeriod=2 Nov 24 13:21:00 crc kubenswrapper[5039]: I1124 13:21:00.123673 5039 patch_prober.go:28] interesting pod/downloads-7954f5f757-98dp9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 24 13:21:00 crc kubenswrapper[5039]: I1124 13:21:00.123747 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-98dp9" podUID="c7032d1d-5aae-4e50-b10f-3df40a0cd983" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 24 13:21:00 crc kubenswrapper[5039]: I1124 13:21:00.606075 5039 generic.go:334] "Generic (PLEG): container finished" podID="c7032d1d-5aae-4e50-b10f-3df40a0cd983" containerID="be4e741e1ccc5c5d4353f637a3d458b95d1c4f92e5dfcc97d8c7a239004f5bf9" exitCode=0 Nov 24 13:21:00 crc kubenswrapper[5039]: I1124 13:21:00.606124 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-98dp9" event={"ID":"c7032d1d-5aae-4e50-b10f-3df40a0cd983","Type":"ContainerDied","Data":"be4e741e1ccc5c5d4353f637a3d458b95d1c4f92e5dfcc97d8c7a239004f5bf9"} Nov 24 13:21:01 crc kubenswrapper[5039]: I1124 13:21:01.409383 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:21:01 crc kubenswrapper[5039]: I1124 13:21:01.416023 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:21:02 crc kubenswrapper[5039]: E1124 13:21:02.700302 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \"/var/tmp/container_images_storage3561374662/1\": happened during read: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 24 13:21:02 crc kubenswrapper[5039]: E1124 13:21:02.700676 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kr676,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-gj66q_openshift-marketplace(6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \"/var/tmp/container_images_storage3561374662/1\": happened during read: context canceled" logger="UnhandledError" Nov 24 13:21:02 crc kubenswrapper[5039]: E1124 13:21:02.702021 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \\\"/var/tmp/container_images_storage3561374662/1\\\": happened during read: context canceled\"" pod="openshift-marketplace/redhat-marketplace-gj66q" podUID="6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0" Nov 24 13:21:03 crc kubenswrapper[5039]: I1124 13:21:03.222711 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:21:05 crc kubenswrapper[5039]: E1124 13:21:05.662138 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-gj66q" podUID="6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0" Nov 24 13:21:07 crc kubenswrapper[5039]: E1124 13:21:07.751304 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 24 13:21:07 crc kubenswrapper[5039]: E1124 13:21:07.751760 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gz8gz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-hvqj8_openshift-marketplace(8e77b6e7-8d78-4d0d-ab33-c27c4d168b74): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 24 13:21:07 crc kubenswrapper[5039]: E1124 13:21:07.753243 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-hvqj8" podUID="8e77b6e7-8d78-4d0d-ab33-c27c4d168b74" Nov 24 13:21:08 crc kubenswrapper[5039]: E1124 13:21:08.598655 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 24 13:21:08 crc kubenswrapper[5039]: E1124 13:21:08.598867 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4ph79,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-76mnr_openshift-marketplace(e7514bf7-ccdc-42f0-a159-78d12f91e55c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 24 13:21:08 crc kubenswrapper[5039]: E1124 13:21:08.600457 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-76mnr" podUID="e7514bf7-ccdc-42f0-a159-78d12f91e55c" Nov 24 13:21:10 crc kubenswrapper[5039]: I1124 13:21:10.119413 5039 patch_prober.go:28] interesting pod/downloads-7954f5f757-98dp9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 24 13:21:10 crc kubenswrapper[5039]: I1124 13:21:10.119799 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-98dp9" podUID="c7032d1d-5aae-4e50-b10f-3df40a0cd983" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 24 13:21:10 crc kubenswrapper[5039]: E1124 13:21:10.638479 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-76mnr" podUID="e7514bf7-ccdc-42f0-a159-78d12f91e55c" Nov 24 13:21:10 crc kubenswrapper[5039]: E1124 13:21:10.642115 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-hvqj8" podUID="8e77b6e7-8d78-4d0d-ab33-c27c4d168b74" Nov 24 13:21:10 crc kubenswrapper[5039]: E1124 13:21:10.834809 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 24 13:21:10 crc kubenswrapper[5039]: E1124 13:21:10.835038 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x72vn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-x68z7_openshift-marketplace(af4ec023-8129-4c1b-99c4-20e814084d4a): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 24 13:21:10 crc kubenswrapper[5039]: E1124 13:21:10.836228 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-x68z7" podUID="af4ec023-8129-4c1b-99c4-20e814084d4a" Nov 24 13:21:10 crc kubenswrapper[5039]: I1124 13:21:10.945705 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-vnpwt"] Nov 24 13:21:11 crc kubenswrapper[5039]: I1124 13:21:11.359920 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ndmnz" Nov 24 13:21:11 crc kubenswrapper[5039]: I1124 13:21:11.661969 5039 generic.go:334] "Generic (PLEG): container finished" podID="aab10f23-6223-4554-9a20-3669e7e0eb72" containerID="68dbc4b770763d8ce8502f42f569d21bea94bc2df45550b16bf0ea2080c0c0ee" exitCode=0 Nov 24 13:21:11 crc kubenswrapper[5039]: I1124 13:21:11.662035 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9979w" event={"ID":"aab10f23-6223-4554-9a20-3669e7e0eb72","Type":"ContainerDied","Data":"68dbc4b770763d8ce8502f42f569d21bea94bc2df45550b16bf0ea2080c0c0ee"} Nov 24 13:21:11 crc kubenswrapper[5039]: I1124 13:21:11.667889 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-vnpwt" event={"ID":"5926107d-81bc-4e34-9e27-8018cbccf590","Type":"ContainerStarted","Data":"309ec2c3bdd8675bd894e0ae9c76430300e54fff9609cf8a34099e4ba0228807"} Nov 24 13:21:11 crc kubenswrapper[5039]: I1124 13:21:11.667939 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-vnpwt" event={"ID":"5926107d-81bc-4e34-9e27-8018cbccf590","Type":"ContainerStarted","Data":"128f0eb5dbb07f57cd2d8db7889b4bde5bb0ca7b22049e129948a7721deefe64"} Nov 24 13:21:11 crc kubenswrapper[5039]: I1124 13:21:11.671781 5039 generic.go:334] "Generic (PLEG): container finished" podID="37739944-4511-4fe7-95df-09d42974532e" containerID="5302180f020c0c586237a2ca41a425bbeb6c1dc998f292bd18c0f11041fd6cdc" exitCode=0 Nov 24 13:21:11 crc kubenswrapper[5039]: I1124 13:21:11.671839 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cd9cq" event={"ID":"37739944-4511-4fe7-95df-09d42974532e","Type":"ContainerDied","Data":"5302180f020c0c586237a2ca41a425bbeb6c1dc998f292bd18c0f11041fd6cdc"} Nov 24 13:21:11 crc kubenswrapper[5039]: I1124 13:21:11.677046 5039 generic.go:334] "Generic (PLEG): container finished" podID="30fffdbb-286b-47ad-887b-fd8ec67725d6" containerID="e34b8c34954a1e18a0b9c55fbcd0e3750c5bcdb537fc1c1e7178af4b594f2fd5" exitCode=0 Nov 24 13:21:11 crc kubenswrapper[5039]: I1124 13:21:11.677118 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdpn4" event={"ID":"30fffdbb-286b-47ad-887b-fd8ec67725d6","Type":"ContainerDied","Data":"e34b8c34954a1e18a0b9c55fbcd0e3750c5bcdb537fc1c1e7178af4b594f2fd5"} Nov 24 13:21:11 crc kubenswrapper[5039]: I1124 13:21:11.683358 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jbzck" event={"ID":"82a55c8b-6639-49c5-b57a-99ed016d7e7c","Type":"ContainerStarted","Data":"2c94f9c81127b91e752997fa338a8ee13650853a4c28d36d4ecbaeb5c029339b"} Nov 24 13:21:11 crc kubenswrapper[5039]: I1124 13:21:11.694081 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-98dp9" event={"ID":"c7032d1d-5aae-4e50-b10f-3df40a0cd983","Type":"ContainerStarted","Data":"1f1d323ccef634a93e5416ac4e78fe9c2547c5ac1d437461bb4623a007ba3400"} Nov 24 13:21:11 crc kubenswrapper[5039]: I1124 13:21:11.694353 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-98dp9" Nov 24 13:21:11 crc kubenswrapper[5039]: I1124 13:21:11.694486 5039 patch_prober.go:28] interesting pod/downloads-7954f5f757-98dp9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 24 13:21:11 crc kubenswrapper[5039]: I1124 13:21:11.694632 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-98dp9" podUID="c7032d1d-5aae-4e50-b10f-3df40a0cd983" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 24 13:21:11 crc kubenswrapper[5039]: E1124 13:21:11.698777 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-x68z7" podUID="af4ec023-8129-4c1b-99c4-20e814084d4a" Nov 24 13:21:12 crc kubenswrapper[5039]: I1124 13:21:12.710440 5039 generic.go:334] "Generic (PLEG): container finished" podID="82a55c8b-6639-49c5-b57a-99ed016d7e7c" containerID="2c94f9c81127b91e752997fa338a8ee13650853a4c28d36d4ecbaeb5c029339b" exitCode=0 Nov 24 13:21:12 crc kubenswrapper[5039]: I1124 13:21:12.711089 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jbzck" event={"ID":"82a55c8b-6639-49c5-b57a-99ed016d7e7c","Type":"ContainerDied","Data":"2c94f9c81127b91e752997fa338a8ee13650853a4c28d36d4ecbaeb5c029339b"} Nov 24 13:21:12 crc kubenswrapper[5039]: I1124 13:21:12.717115 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-vnpwt" event={"ID":"5926107d-81bc-4e34-9e27-8018cbccf590","Type":"ContainerStarted","Data":"71f40b491118a19bc6a9d9eda63b9d66bf0baadafdf6f2f7ee8111350db672ba"} Nov 24 13:21:12 crc kubenswrapper[5039]: I1124 13:21:12.718470 5039 patch_prober.go:28] interesting pod/downloads-7954f5f757-98dp9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 24 13:21:12 crc kubenswrapper[5039]: I1124 13:21:12.718548 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-98dp9" podUID="c7032d1d-5aae-4e50-b10f-3df40a0cd983" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 24 13:21:12 crc kubenswrapper[5039]: I1124 13:21:12.749793 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-vnpwt" podStartSLOduration=159.74976566 podStartE2EDuration="2m39.74976566s" podCreationTimestamp="2025-11-24 13:18:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:21:12.749084863 +0000 UTC m=+185.188209363" watchObservedRunningTime="2025-11-24 13:21:12.74976566 +0000 UTC m=+185.188890180" Nov 24 13:21:13 crc kubenswrapper[5039]: I1124 13:21:13.725817 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9979w" event={"ID":"aab10f23-6223-4554-9a20-3669e7e0eb72","Type":"ContainerStarted","Data":"8be8aaadba9972bbaa2b66e15014a9539cef6ccc900d4df7e4f978e223263e18"} Nov 24 13:21:13 crc kubenswrapper[5039]: I1124 13:21:13.728972 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cd9cq" event={"ID":"37739944-4511-4fe7-95df-09d42974532e","Type":"ContainerStarted","Data":"c81b97a4494d2c4b7570562579a70ede5220900d52bbcfcfe993736d2baf701f"} Nov 24 13:21:13 crc kubenswrapper[5039]: I1124 13:21:13.733725 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdpn4" event={"ID":"30fffdbb-286b-47ad-887b-fd8ec67725d6","Type":"ContainerStarted","Data":"6bfb6c03941b7d648256b36f62b2a3db978c5b44b5e117a1383077cfcbc3bd29"} Nov 24 13:21:13 crc kubenswrapper[5039]: I1124 13:21:13.749430 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-9979w" podStartSLOduration=3.47473468 podStartE2EDuration="34.749405662s" podCreationTimestamp="2025-11-24 13:20:39 +0000 UTC" firstStartedPulling="2025-11-24 13:20:41.26046189 +0000 UTC m=+153.699586390" lastFinishedPulling="2025-11-24 13:21:12.535132872 +0000 UTC m=+184.974257372" observedRunningTime="2025-11-24 13:21:13.744131906 +0000 UTC m=+186.183256436" watchObservedRunningTime="2025-11-24 13:21:13.749405662 +0000 UTC m=+186.188530172" Nov 24 13:21:13 crc kubenswrapper[5039]: I1124 13:21:13.769157 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-cd9cq" podStartSLOduration=3.689873482 podStartE2EDuration="32.769135911s" podCreationTimestamp="2025-11-24 13:20:41 +0000 UTC" firstStartedPulling="2025-11-24 13:20:43.40263107 +0000 UTC m=+155.841755570" lastFinishedPulling="2025-11-24 13:21:12.481893499 +0000 UTC m=+184.921017999" observedRunningTime="2025-11-24 13:21:13.768562526 +0000 UTC m=+186.207687026" watchObservedRunningTime="2025-11-24 13:21:13.769135911 +0000 UTC m=+186.208260411" Nov 24 13:21:13 crc kubenswrapper[5039]: I1124 13:21:13.790479 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tdpn4" podStartSLOduration=3.410062442 podStartE2EDuration="34.790460642s" podCreationTimestamp="2025-11-24 13:20:39 +0000 UTC" firstStartedPulling="2025-11-24 13:20:41.207873673 +0000 UTC m=+153.646998173" lastFinishedPulling="2025-11-24 13:21:12.588271863 +0000 UTC m=+185.027396373" observedRunningTime="2025-11-24 13:21:13.787864595 +0000 UTC m=+186.226989095" watchObservedRunningTime="2025-11-24 13:21:13.790460642 +0000 UTC m=+186.229585142" Nov 24 13:21:14 crc kubenswrapper[5039]: I1124 13:21:14.740397 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jbzck" event={"ID":"82a55c8b-6639-49c5-b57a-99ed016d7e7c","Type":"ContainerStarted","Data":"e96821832ec97c2500c752dac821c34aa05bdc59a416f2dd87934ce28be67c26"} Nov 24 13:21:14 crc kubenswrapper[5039]: I1124 13:21:14.759952 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jbzck" podStartSLOduration=3.461219584 podStartE2EDuration="32.759935265s" podCreationTimestamp="2025-11-24 13:20:42 +0000 UTC" firstStartedPulling="2025-11-24 13:20:44.458640896 +0000 UTC m=+156.897765396" lastFinishedPulling="2025-11-24 13:21:13.757356577 +0000 UTC m=+186.196481077" observedRunningTime="2025-11-24 13:21:14.757262516 +0000 UTC m=+187.196387036" watchObservedRunningTime="2025-11-24 13:21:14.759935265 +0000 UTC m=+187.199059785" Nov 24 13:21:17 crc kubenswrapper[5039]: I1124 13:21:17.341179 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 13:21:19 crc kubenswrapper[5039]: I1124 13:21:19.726578 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-9979w" Nov 24 13:21:19 crc kubenswrapper[5039]: I1124 13:21:19.727115 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-9979w" Nov 24 13:21:20 crc kubenswrapper[5039]: I1124 13:21:20.101877 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:21:20 crc kubenswrapper[5039]: I1124 13:21:20.102119 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:21:20 crc kubenswrapper[5039]: I1124 13:21:20.124755 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tdpn4" Nov 24 13:21:20 crc kubenswrapper[5039]: I1124 13:21:20.124813 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tdpn4" Nov 24 13:21:20 crc kubenswrapper[5039]: I1124 13:21:20.144447 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-98dp9" Nov 24 13:21:20 crc kubenswrapper[5039]: I1124 13:21:20.882758 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-9979w" Nov 24 13:21:20 crc kubenswrapper[5039]: I1124 13:21:20.882919 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tdpn4" Nov 24 13:21:20 crc kubenswrapper[5039]: I1124 13:21:20.933333 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tdpn4" Nov 24 13:21:20 crc kubenswrapper[5039]: I1124 13:21:20.940763 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-9979w" Nov 24 13:21:21 crc kubenswrapper[5039]: I1124 13:21:21.898733 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-cd9cq" Nov 24 13:21:21 crc kubenswrapper[5039]: I1124 13:21:21.899275 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-cd9cq" Nov 24 13:21:21 crc kubenswrapper[5039]: I1124 13:21:21.940977 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-cd9cq" Nov 24 13:21:22 crc kubenswrapper[5039]: I1124 13:21:22.718084 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tdpn4"] Nov 24 13:21:22 crc kubenswrapper[5039]: I1124 13:21:22.788854 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-tdpn4" podUID="30fffdbb-286b-47ad-887b-fd8ec67725d6" containerName="registry-server" containerID="cri-o://6bfb6c03941b7d648256b36f62b2a3db978c5b44b5e117a1383077cfcbc3bd29" gracePeriod=2 Nov 24 13:21:22 crc kubenswrapper[5039]: I1124 13:21:22.846187 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-cd9cq" Nov 24 13:21:23 crc kubenswrapper[5039]: I1124 13:21:23.304801 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jbzck" Nov 24 13:21:23 crc kubenswrapper[5039]: I1124 13:21:23.305823 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jbzck" Nov 24 13:21:23 crc kubenswrapper[5039]: I1124 13:21:23.365666 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jbzck" Nov 24 13:21:23 crc kubenswrapper[5039]: I1124 13:21:23.806671 5039 generic.go:334] "Generic (PLEG): container finished" podID="30fffdbb-286b-47ad-887b-fd8ec67725d6" containerID="6bfb6c03941b7d648256b36f62b2a3db978c5b44b5e117a1383077cfcbc3bd29" exitCode=0 Nov 24 13:21:23 crc kubenswrapper[5039]: I1124 13:21:23.806720 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdpn4" event={"ID":"30fffdbb-286b-47ad-887b-fd8ec67725d6","Type":"ContainerDied","Data":"6bfb6c03941b7d648256b36f62b2a3db978c5b44b5e117a1383077cfcbc3bd29"} Nov 24 13:21:23 crc kubenswrapper[5039]: I1124 13:21:23.856798 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jbzck" Nov 24 13:21:23 crc kubenswrapper[5039]: I1124 13:21:23.881926 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tdpn4" Nov 24 13:21:23 crc kubenswrapper[5039]: I1124 13:21:23.905013 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30fffdbb-286b-47ad-887b-fd8ec67725d6-utilities\") pod \"30fffdbb-286b-47ad-887b-fd8ec67725d6\" (UID: \"30fffdbb-286b-47ad-887b-fd8ec67725d6\") " Nov 24 13:21:23 crc kubenswrapper[5039]: I1124 13:21:23.905112 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30fffdbb-286b-47ad-887b-fd8ec67725d6-catalog-content\") pod \"30fffdbb-286b-47ad-887b-fd8ec67725d6\" (UID: \"30fffdbb-286b-47ad-887b-fd8ec67725d6\") " Nov 24 13:21:23 crc kubenswrapper[5039]: I1124 13:21:23.905179 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v82zn\" (UniqueName: \"kubernetes.io/projected/30fffdbb-286b-47ad-887b-fd8ec67725d6-kube-api-access-v82zn\") pod \"30fffdbb-286b-47ad-887b-fd8ec67725d6\" (UID: \"30fffdbb-286b-47ad-887b-fd8ec67725d6\") " Nov 24 13:21:23 crc kubenswrapper[5039]: I1124 13:21:23.906482 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/30fffdbb-286b-47ad-887b-fd8ec67725d6-utilities" (OuterVolumeSpecName: "utilities") pod "30fffdbb-286b-47ad-887b-fd8ec67725d6" (UID: "30fffdbb-286b-47ad-887b-fd8ec67725d6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:21:23 crc kubenswrapper[5039]: I1124 13:21:23.912415 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30fffdbb-286b-47ad-887b-fd8ec67725d6-kube-api-access-v82zn" (OuterVolumeSpecName: "kube-api-access-v82zn") pod "30fffdbb-286b-47ad-887b-fd8ec67725d6" (UID: "30fffdbb-286b-47ad-887b-fd8ec67725d6"). InnerVolumeSpecName "kube-api-access-v82zn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:21:23 crc kubenswrapper[5039]: I1124 13:21:23.960691 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/30fffdbb-286b-47ad-887b-fd8ec67725d6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "30fffdbb-286b-47ad-887b-fd8ec67725d6" (UID: "30fffdbb-286b-47ad-887b-fd8ec67725d6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:21:24 crc kubenswrapper[5039]: I1124 13:21:24.006978 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30fffdbb-286b-47ad-887b-fd8ec67725d6-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:21:24 crc kubenswrapper[5039]: I1124 13:21:24.007018 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30fffdbb-286b-47ad-887b-fd8ec67725d6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:21:24 crc kubenswrapper[5039]: I1124 13:21:24.007037 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v82zn\" (UniqueName: \"kubernetes.io/projected/30fffdbb-286b-47ad-887b-fd8ec67725d6-kube-api-access-v82zn\") on node \"crc\" DevicePath \"\"" Nov 24 13:21:24 crc kubenswrapper[5039]: I1124 13:21:24.816175 5039 generic.go:334] "Generic (PLEG): container finished" podID="8e77b6e7-8d78-4d0d-ab33-c27c4d168b74" containerID="6a05e7f8ebc52828dfdacf5b845b228094ff4d99e2b98a9820f0cec4189613dc" exitCode=0 Nov 24 13:21:24 crc kubenswrapper[5039]: I1124 13:21:24.816675 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hvqj8" event={"ID":"8e77b6e7-8d78-4d0d-ab33-c27c4d168b74","Type":"ContainerDied","Data":"6a05e7f8ebc52828dfdacf5b845b228094ff4d99e2b98a9820f0cec4189613dc"} Nov 24 13:21:24 crc kubenswrapper[5039]: I1124 13:21:24.836844 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdpn4" event={"ID":"30fffdbb-286b-47ad-887b-fd8ec67725d6","Type":"ContainerDied","Data":"4ea77a08f8f662011b569ae6898b5b274584171a3a495cf723c3544630da7f3c"} Nov 24 13:21:24 crc kubenswrapper[5039]: I1124 13:21:24.836893 5039 scope.go:117] "RemoveContainer" containerID="6bfb6c03941b7d648256b36f62b2a3db978c5b44b5e117a1383077cfcbc3bd29" Nov 24 13:21:24 crc kubenswrapper[5039]: I1124 13:21:24.837002 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tdpn4" Nov 24 13:21:24 crc kubenswrapper[5039]: I1124 13:21:24.839977 5039 generic.go:334] "Generic (PLEG): container finished" podID="af4ec023-8129-4c1b-99c4-20e814084d4a" containerID="f81cc69053655aab7cb2e2531f937da9ec1a1da527481788457490ab3bb6bef1" exitCode=0 Nov 24 13:21:24 crc kubenswrapper[5039]: I1124 13:21:24.840027 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x68z7" event={"ID":"af4ec023-8129-4c1b-99c4-20e814084d4a","Type":"ContainerDied","Data":"f81cc69053655aab7cb2e2531f937da9ec1a1da527481788457490ab3bb6bef1"} Nov 24 13:21:24 crc kubenswrapper[5039]: I1124 13:21:24.846139 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-76mnr" event={"ID":"e7514bf7-ccdc-42f0-a159-78d12f91e55c","Type":"ContainerStarted","Data":"9b10b2cb0655af0d0cd45825be01fda50aa6350c85721da7166f4cbd89908925"} Nov 24 13:21:24 crc kubenswrapper[5039]: I1124 13:21:24.864149 5039 generic.go:334] "Generic (PLEG): container finished" podID="6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0" containerID="9626e97704a011c2969781aa6adc89fd936ebf8dcf4d99ca098d60f57bd0225d" exitCode=0 Nov 24 13:21:24 crc kubenswrapper[5039]: I1124 13:21:24.864207 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gj66q" event={"ID":"6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0","Type":"ContainerDied","Data":"9626e97704a011c2969781aa6adc89fd936ebf8dcf4d99ca098d60f57bd0225d"} Nov 24 13:21:24 crc kubenswrapper[5039]: I1124 13:21:24.886407 5039 scope.go:117] "RemoveContainer" containerID="e34b8c34954a1e18a0b9c55fbcd0e3750c5bcdb537fc1c1e7178af4b594f2fd5" Nov 24 13:21:24 crc kubenswrapper[5039]: I1124 13:21:24.913610 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tdpn4"] Nov 24 13:21:24 crc kubenswrapper[5039]: I1124 13:21:24.913989 5039 scope.go:117] "RemoveContainer" containerID="f7455a1b83d13bec7c15b846eaa8bda46a116b0629ddc5943c31c825eec0b7eb" Nov 24 13:21:24 crc kubenswrapper[5039]: I1124 13:21:24.918404 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-tdpn4"] Nov 24 13:21:25 crc kubenswrapper[5039]: I1124 13:21:25.117038 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jbzck"] Nov 24 13:21:25 crc kubenswrapper[5039]: I1124 13:21:25.878134 5039 generic.go:334] "Generic (PLEG): container finished" podID="e7514bf7-ccdc-42f0-a159-78d12f91e55c" containerID="9b10b2cb0655af0d0cd45825be01fda50aa6350c85721da7166f4cbd89908925" exitCode=0 Nov 24 13:21:25 crc kubenswrapper[5039]: I1124 13:21:25.878177 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-76mnr" event={"ID":"e7514bf7-ccdc-42f0-a159-78d12f91e55c","Type":"ContainerDied","Data":"9b10b2cb0655af0d0cd45825be01fda50aa6350c85721da7166f4cbd89908925"} Nov 24 13:21:26 crc kubenswrapper[5039]: I1124 13:21:26.314889 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30fffdbb-286b-47ad-887b-fd8ec67725d6" path="/var/lib/kubelet/pods/30fffdbb-286b-47ad-887b-fd8ec67725d6/volumes" Nov 24 13:21:26 crc kubenswrapper[5039]: I1124 13:21:26.885126 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-76mnr" event={"ID":"e7514bf7-ccdc-42f0-a159-78d12f91e55c","Type":"ContainerStarted","Data":"9cb540a7b572d6ee3e91724abbf04fec807098d0ce229c1d34c9af5a7494e081"} Nov 24 13:21:26 crc kubenswrapper[5039]: I1124 13:21:26.887896 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gj66q" event={"ID":"6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0","Type":"ContainerStarted","Data":"72a283df2d53ab5053a44c6896b458a5abf98fbb9974fa3b818661dfe2718e70"} Nov 24 13:21:26 crc kubenswrapper[5039]: I1124 13:21:26.890207 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hvqj8" event={"ID":"8e77b6e7-8d78-4d0d-ab33-c27c4d168b74","Type":"ContainerStarted","Data":"85d65267e4ede8f23d6682411198c7c3fdf02599ca370150d1b66d46e0dc9bf1"} Nov 24 13:21:26 crc kubenswrapper[5039]: I1124 13:21:26.892679 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x68z7" event={"ID":"af4ec023-8129-4c1b-99c4-20e814084d4a","Type":"ContainerStarted","Data":"c91399e795dbd979f837648d3b9f284da98760a6f31c0e4153dd7670e38606a9"} Nov 24 13:21:26 crc kubenswrapper[5039]: I1124 13:21:26.893057 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-jbzck" podUID="82a55c8b-6639-49c5-b57a-99ed016d7e7c" containerName="registry-server" containerID="cri-o://e96821832ec97c2500c752dac821c34aa05bdc59a416f2dd87934ce28be67c26" gracePeriod=2 Nov 24 13:21:26 crc kubenswrapper[5039]: I1124 13:21:26.911025 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-76mnr" podStartSLOduration=3.166386189 podStartE2EDuration="47.911004175s" podCreationTimestamp="2025-11-24 13:20:39 +0000 UTC" firstStartedPulling="2025-11-24 13:20:41.227750046 +0000 UTC m=+153.666874546" lastFinishedPulling="2025-11-24 13:21:25.972368032 +0000 UTC m=+198.411492532" observedRunningTime="2025-11-24 13:21:26.90843925 +0000 UTC m=+199.347563750" watchObservedRunningTime="2025-11-24 13:21:26.911004175 +0000 UTC m=+199.350128675" Nov 24 13:21:26 crc kubenswrapper[5039]: I1124 13:21:26.927051 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-gj66q" podStartSLOduration=3.4003951519999998 podStartE2EDuration="45.927036122s" podCreationTimestamp="2025-11-24 13:20:41 +0000 UTC" firstStartedPulling="2025-11-24 13:20:43.379183605 +0000 UTC m=+155.818308105" lastFinishedPulling="2025-11-24 13:21:25.905824575 +0000 UTC m=+198.344949075" observedRunningTime="2025-11-24 13:21:26.925529673 +0000 UTC m=+199.364654173" watchObservedRunningTime="2025-11-24 13:21:26.927036122 +0000 UTC m=+199.366160612" Nov 24 13:21:26 crc kubenswrapper[5039]: I1124 13:21:26.948541 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hvqj8" podStartSLOduration=4.46401437 podStartE2EDuration="47.948526499s" podCreationTimestamp="2025-11-24 13:20:39 +0000 UTC" firstStartedPulling="2025-11-24 13:20:42.302377782 +0000 UTC m=+154.741502282" lastFinishedPulling="2025-11-24 13:21:25.786889901 +0000 UTC m=+198.226014411" observedRunningTime="2025-11-24 13:21:26.947551524 +0000 UTC m=+199.386676024" watchObservedRunningTime="2025-11-24 13:21:26.948526499 +0000 UTC m=+199.387650999" Nov 24 13:21:26 crc kubenswrapper[5039]: I1124 13:21:26.968087 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-x68z7" podStartSLOduration=3.631736331 podStartE2EDuration="44.968066466s" podCreationTimestamp="2025-11-24 13:20:42 +0000 UTC" firstStartedPulling="2025-11-24 13:20:44.473064029 +0000 UTC m=+156.912188529" lastFinishedPulling="2025-11-24 13:21:25.809394164 +0000 UTC m=+198.248518664" observedRunningTime="2025-11-24 13:21:26.965957212 +0000 UTC m=+199.405081712" watchObservedRunningTime="2025-11-24 13:21:26.968066466 +0000 UTC m=+199.407190966" Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.238668 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jbzck" Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.245912 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82a55c8b-6639-49c5-b57a-99ed016d7e7c-catalog-content\") pod \"82a55c8b-6639-49c5-b57a-99ed016d7e7c\" (UID: \"82a55c8b-6639-49c5-b57a-99ed016d7e7c\") " Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.245994 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qz5v7\" (UniqueName: \"kubernetes.io/projected/82a55c8b-6639-49c5-b57a-99ed016d7e7c-kube-api-access-qz5v7\") pod \"82a55c8b-6639-49c5-b57a-99ed016d7e7c\" (UID: \"82a55c8b-6639-49c5-b57a-99ed016d7e7c\") " Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.246023 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82a55c8b-6639-49c5-b57a-99ed016d7e7c-utilities\") pod \"82a55c8b-6639-49c5-b57a-99ed016d7e7c\" (UID: \"82a55c8b-6639-49c5-b57a-99ed016d7e7c\") " Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.247049 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82a55c8b-6639-49c5-b57a-99ed016d7e7c-utilities" (OuterVolumeSpecName: "utilities") pod "82a55c8b-6639-49c5-b57a-99ed016d7e7c" (UID: "82a55c8b-6639-49c5-b57a-99ed016d7e7c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.262351 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82a55c8b-6639-49c5-b57a-99ed016d7e7c-kube-api-access-qz5v7" (OuterVolumeSpecName: "kube-api-access-qz5v7") pod "82a55c8b-6639-49c5-b57a-99ed016d7e7c" (UID: "82a55c8b-6639-49c5-b57a-99ed016d7e7c"). InnerVolumeSpecName "kube-api-access-qz5v7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.347628 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qz5v7\" (UniqueName: \"kubernetes.io/projected/82a55c8b-6639-49c5-b57a-99ed016d7e7c-kube-api-access-qz5v7\") on node \"crc\" DevicePath \"\"" Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.347667 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82a55c8b-6639-49c5-b57a-99ed016d7e7c-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.382873 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82a55c8b-6639-49c5-b57a-99ed016d7e7c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "82a55c8b-6639-49c5-b57a-99ed016d7e7c" (UID: "82a55c8b-6639-49c5-b57a-99ed016d7e7c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.448882 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82a55c8b-6639-49c5-b57a-99ed016d7e7c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.899346 5039 generic.go:334] "Generic (PLEG): container finished" podID="82a55c8b-6639-49c5-b57a-99ed016d7e7c" containerID="e96821832ec97c2500c752dac821c34aa05bdc59a416f2dd87934ce28be67c26" exitCode=0 Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.899400 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jbzck" event={"ID":"82a55c8b-6639-49c5-b57a-99ed016d7e7c","Type":"ContainerDied","Data":"e96821832ec97c2500c752dac821c34aa05bdc59a416f2dd87934ce28be67c26"} Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.899450 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jbzck" event={"ID":"82a55c8b-6639-49c5-b57a-99ed016d7e7c","Type":"ContainerDied","Data":"096143f81f16ec88dcfa8333133915a216f24cd691fe4e265a8a535cb3a61d39"} Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.899467 5039 scope.go:117] "RemoveContainer" containerID="e96821832ec97c2500c752dac821c34aa05bdc59a416f2dd87934ce28be67c26" Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.899642 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jbzck" Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.925186 5039 scope.go:117] "RemoveContainer" containerID="2c94f9c81127b91e752997fa338a8ee13650853a4c28d36d4ecbaeb5c029339b" Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.926864 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jbzck"] Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.942319 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-jbzck"] Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.961841 5039 scope.go:117] "RemoveContainer" containerID="5046eda285cc8c6c9d033d0e9fe03d98ce8b3e2b30f9a83fb7d2ded0df0da43b" Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.976281 5039 scope.go:117] "RemoveContainer" containerID="e96821832ec97c2500c752dac821c34aa05bdc59a416f2dd87934ce28be67c26" Nov 24 13:21:27 crc kubenswrapper[5039]: E1124 13:21:27.976741 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e96821832ec97c2500c752dac821c34aa05bdc59a416f2dd87934ce28be67c26\": container with ID starting with e96821832ec97c2500c752dac821c34aa05bdc59a416f2dd87934ce28be67c26 not found: ID does not exist" containerID="e96821832ec97c2500c752dac821c34aa05bdc59a416f2dd87934ce28be67c26" Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.976775 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e96821832ec97c2500c752dac821c34aa05bdc59a416f2dd87934ce28be67c26"} err="failed to get container status \"e96821832ec97c2500c752dac821c34aa05bdc59a416f2dd87934ce28be67c26\": rpc error: code = NotFound desc = could not find container \"e96821832ec97c2500c752dac821c34aa05bdc59a416f2dd87934ce28be67c26\": container with ID starting with e96821832ec97c2500c752dac821c34aa05bdc59a416f2dd87934ce28be67c26 not found: ID does not exist" Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.976802 5039 scope.go:117] "RemoveContainer" containerID="2c94f9c81127b91e752997fa338a8ee13650853a4c28d36d4ecbaeb5c029339b" Nov 24 13:21:27 crc kubenswrapper[5039]: E1124 13:21:27.977157 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c94f9c81127b91e752997fa338a8ee13650853a4c28d36d4ecbaeb5c029339b\": container with ID starting with 2c94f9c81127b91e752997fa338a8ee13650853a4c28d36d4ecbaeb5c029339b not found: ID does not exist" containerID="2c94f9c81127b91e752997fa338a8ee13650853a4c28d36d4ecbaeb5c029339b" Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.977174 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c94f9c81127b91e752997fa338a8ee13650853a4c28d36d4ecbaeb5c029339b"} err="failed to get container status \"2c94f9c81127b91e752997fa338a8ee13650853a4c28d36d4ecbaeb5c029339b\": rpc error: code = NotFound desc = could not find container \"2c94f9c81127b91e752997fa338a8ee13650853a4c28d36d4ecbaeb5c029339b\": container with ID starting with 2c94f9c81127b91e752997fa338a8ee13650853a4c28d36d4ecbaeb5c029339b not found: ID does not exist" Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.977188 5039 scope.go:117] "RemoveContainer" containerID="5046eda285cc8c6c9d033d0e9fe03d98ce8b3e2b30f9a83fb7d2ded0df0da43b" Nov 24 13:21:27 crc kubenswrapper[5039]: E1124 13:21:27.977387 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5046eda285cc8c6c9d033d0e9fe03d98ce8b3e2b30f9a83fb7d2ded0df0da43b\": container with ID starting with 5046eda285cc8c6c9d033d0e9fe03d98ce8b3e2b30f9a83fb7d2ded0df0da43b not found: ID does not exist" containerID="5046eda285cc8c6c9d033d0e9fe03d98ce8b3e2b30f9a83fb7d2ded0df0da43b" Nov 24 13:21:27 crc kubenswrapper[5039]: I1124 13:21:27.977403 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5046eda285cc8c6c9d033d0e9fe03d98ce8b3e2b30f9a83fb7d2ded0df0da43b"} err="failed to get container status \"5046eda285cc8c6c9d033d0e9fe03d98ce8b3e2b30f9a83fb7d2ded0df0da43b\": rpc error: code = NotFound desc = could not find container \"5046eda285cc8c6c9d033d0e9fe03d98ce8b3e2b30f9a83fb7d2ded0df0da43b\": container with ID starting with 5046eda285cc8c6c9d033d0e9fe03d98ce8b3e2b30f9a83fb7d2ded0df0da43b not found: ID does not exist" Nov 24 13:21:28 crc kubenswrapper[5039]: I1124 13:21:28.315625 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82a55c8b-6639-49c5-b57a-99ed016d7e7c" path="/var/lib/kubelet/pods/82a55c8b-6639-49c5-b57a-99ed016d7e7c/volumes" Nov 24 13:21:29 crc kubenswrapper[5039]: I1124 13:21:29.909705 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-76mnr" Nov 24 13:21:29 crc kubenswrapper[5039]: I1124 13:21:29.910297 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-76mnr" Nov 24 13:21:29 crc kubenswrapper[5039]: I1124 13:21:29.966558 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-76mnr" Nov 24 13:21:30 crc kubenswrapper[5039]: I1124 13:21:30.347669 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-hvqj8" Nov 24 13:21:30 crc kubenswrapper[5039]: I1124 13:21:30.347728 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hvqj8" Nov 24 13:21:30 crc kubenswrapper[5039]: I1124 13:21:30.396095 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hvqj8" Nov 24 13:21:30 crc kubenswrapper[5039]: I1124 13:21:30.969425 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hvqj8" Nov 24 13:21:31 crc kubenswrapper[5039]: I1124 13:21:31.973021 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-76mnr" Nov 24 13:21:32 crc kubenswrapper[5039]: I1124 13:21:32.328176 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-gj66q" Nov 24 13:21:32 crc kubenswrapper[5039]: I1124 13:21:32.328214 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-gj66q" Nov 24 13:21:32 crc kubenswrapper[5039]: I1124 13:21:32.368935 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-gj66q" Nov 24 13:21:32 crc kubenswrapper[5039]: I1124 13:21:32.978235 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-gj66q" Nov 24 13:21:33 crc kubenswrapper[5039]: I1124 13:21:33.004082 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-x68z7" Nov 24 13:21:33 crc kubenswrapper[5039]: I1124 13:21:33.004128 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-x68z7" Nov 24 13:21:33 crc kubenswrapper[5039]: I1124 13:21:33.051227 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-x68z7" Nov 24 13:21:33 crc kubenswrapper[5039]: I1124 13:21:33.711669 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hvqj8"] Nov 24 13:21:33 crc kubenswrapper[5039]: I1124 13:21:33.712490 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hvqj8" podUID="8e77b6e7-8d78-4d0d-ab33-c27c4d168b74" containerName="registry-server" containerID="cri-o://85d65267e4ede8f23d6682411198c7c3fdf02599ca370150d1b66d46e0dc9bf1" gracePeriod=2 Nov 24 13:21:33 crc kubenswrapper[5039]: I1124 13:21:33.942703 5039 generic.go:334] "Generic (PLEG): container finished" podID="8e77b6e7-8d78-4d0d-ab33-c27c4d168b74" containerID="85d65267e4ede8f23d6682411198c7c3fdf02599ca370150d1b66d46e0dc9bf1" exitCode=0 Nov 24 13:21:33 crc kubenswrapper[5039]: I1124 13:21:33.942776 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hvqj8" event={"ID":"8e77b6e7-8d78-4d0d-ab33-c27c4d168b74","Type":"ContainerDied","Data":"85d65267e4ede8f23d6682411198c7c3fdf02599ca370150d1b66d46e0dc9bf1"} Nov 24 13:21:34 crc kubenswrapper[5039]: I1124 13:21:34.017021 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-x68z7" Nov 24 13:21:34 crc kubenswrapper[5039]: I1124 13:21:34.062338 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hvqj8" Nov 24 13:21:34 crc kubenswrapper[5039]: I1124 13:21:34.252207 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gz8gz\" (UniqueName: \"kubernetes.io/projected/8e77b6e7-8d78-4d0d-ab33-c27c4d168b74-kube-api-access-gz8gz\") pod \"8e77b6e7-8d78-4d0d-ab33-c27c4d168b74\" (UID: \"8e77b6e7-8d78-4d0d-ab33-c27c4d168b74\") " Nov 24 13:21:34 crc kubenswrapper[5039]: I1124 13:21:34.252278 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e77b6e7-8d78-4d0d-ab33-c27c4d168b74-utilities\") pod \"8e77b6e7-8d78-4d0d-ab33-c27c4d168b74\" (UID: \"8e77b6e7-8d78-4d0d-ab33-c27c4d168b74\") " Nov 24 13:21:34 crc kubenswrapper[5039]: I1124 13:21:34.252353 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e77b6e7-8d78-4d0d-ab33-c27c4d168b74-catalog-content\") pod \"8e77b6e7-8d78-4d0d-ab33-c27c4d168b74\" (UID: \"8e77b6e7-8d78-4d0d-ab33-c27c4d168b74\") " Nov 24 13:21:34 crc kubenswrapper[5039]: I1124 13:21:34.253282 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e77b6e7-8d78-4d0d-ab33-c27c4d168b74-utilities" (OuterVolumeSpecName: "utilities") pod "8e77b6e7-8d78-4d0d-ab33-c27c4d168b74" (UID: "8e77b6e7-8d78-4d0d-ab33-c27c4d168b74"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:21:34 crc kubenswrapper[5039]: I1124 13:21:34.257161 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e77b6e7-8d78-4d0d-ab33-c27c4d168b74-kube-api-access-gz8gz" (OuterVolumeSpecName: "kube-api-access-gz8gz") pod "8e77b6e7-8d78-4d0d-ab33-c27c4d168b74" (UID: "8e77b6e7-8d78-4d0d-ab33-c27c4d168b74"). InnerVolumeSpecName "kube-api-access-gz8gz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:21:34 crc kubenswrapper[5039]: I1124 13:21:34.298352 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e77b6e7-8d78-4d0d-ab33-c27c4d168b74-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8e77b6e7-8d78-4d0d-ab33-c27c4d168b74" (UID: "8e77b6e7-8d78-4d0d-ab33-c27c4d168b74"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:21:34 crc kubenswrapper[5039]: I1124 13:21:34.353920 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e77b6e7-8d78-4d0d-ab33-c27c4d168b74-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:21:34 crc kubenswrapper[5039]: I1124 13:21:34.353958 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gz8gz\" (UniqueName: \"kubernetes.io/projected/8e77b6e7-8d78-4d0d-ab33-c27c4d168b74-kube-api-access-gz8gz\") on node \"crc\" DevicePath \"\"" Nov 24 13:21:34 crc kubenswrapper[5039]: I1124 13:21:34.353968 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e77b6e7-8d78-4d0d-ab33-c27c4d168b74-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:21:34 crc kubenswrapper[5039]: I1124 13:21:34.951017 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hvqj8" event={"ID":"8e77b6e7-8d78-4d0d-ab33-c27c4d168b74","Type":"ContainerDied","Data":"6f21ef6000a028a0df48bc3322c7e896cdbf303ea9b5f0b2300034efd9e1af65"} Nov 24 13:21:34 crc kubenswrapper[5039]: I1124 13:21:34.951770 5039 scope.go:117] "RemoveContainer" containerID="85d65267e4ede8f23d6682411198c7c3fdf02599ca370150d1b66d46e0dc9bf1" Nov 24 13:21:34 crc kubenswrapper[5039]: I1124 13:21:34.951043 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hvqj8" Nov 24 13:21:34 crc kubenswrapper[5039]: I1124 13:21:34.966956 5039 scope.go:117] "RemoveContainer" containerID="6a05e7f8ebc52828dfdacf5b845b228094ff4d99e2b98a9820f0cec4189613dc" Nov 24 13:21:34 crc kubenswrapper[5039]: I1124 13:21:34.970809 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hvqj8"] Nov 24 13:21:34 crc kubenswrapper[5039]: I1124 13:21:34.973901 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hvqj8"] Nov 24 13:21:34 crc kubenswrapper[5039]: I1124 13:21:34.982119 5039 scope.go:117] "RemoveContainer" containerID="8d5096b6f4b8705d2e77a594052ed82d7a7dd5420809393dc720b4c2ca46ddae" Nov 24 13:21:36 crc kubenswrapper[5039]: I1124 13:21:36.110729 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gj66q"] Nov 24 13:21:36 crc kubenswrapper[5039]: I1124 13:21:36.111314 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-gj66q" podUID="6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0" containerName="registry-server" containerID="cri-o://72a283df2d53ab5053a44c6896b458a5abf98fbb9974fa3b818661dfe2718e70" gracePeriod=2 Nov 24 13:21:36 crc kubenswrapper[5039]: I1124 13:21:36.312460 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e77b6e7-8d78-4d0d-ab33-c27c4d168b74" path="/var/lib/kubelet/pods/8e77b6e7-8d78-4d0d-ab33-c27c4d168b74/volumes" Nov 24 13:21:36 crc kubenswrapper[5039]: I1124 13:21:36.969317 5039 generic.go:334] "Generic (PLEG): container finished" podID="6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0" containerID="72a283df2d53ab5053a44c6896b458a5abf98fbb9974fa3b818661dfe2718e70" exitCode=0 Nov 24 13:21:36 crc kubenswrapper[5039]: I1124 13:21:36.969359 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gj66q" event={"ID":"6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0","Type":"ContainerDied","Data":"72a283df2d53ab5053a44c6896b458a5abf98fbb9974fa3b818661dfe2718e70"} Nov 24 13:21:37 crc kubenswrapper[5039]: I1124 13:21:37.317898 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gj66q" Nov 24 13:21:37 crc kubenswrapper[5039]: I1124 13:21:37.489363 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0-catalog-content\") pod \"6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0\" (UID: \"6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0\") " Nov 24 13:21:37 crc kubenswrapper[5039]: I1124 13:21:37.489481 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kr676\" (UniqueName: \"kubernetes.io/projected/6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0-kube-api-access-kr676\") pod \"6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0\" (UID: \"6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0\") " Nov 24 13:21:37 crc kubenswrapper[5039]: I1124 13:21:37.489612 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0-utilities\") pod \"6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0\" (UID: \"6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0\") " Nov 24 13:21:37 crc kubenswrapper[5039]: I1124 13:21:37.491582 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0-utilities" (OuterVolumeSpecName: "utilities") pod "6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0" (UID: "6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:21:37 crc kubenswrapper[5039]: I1124 13:21:37.501448 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0-kube-api-access-kr676" (OuterVolumeSpecName: "kube-api-access-kr676") pod "6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0" (UID: "6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0"). InnerVolumeSpecName "kube-api-access-kr676". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:21:37 crc kubenswrapper[5039]: I1124 13:21:37.506450 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0" (UID: "6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:21:37 crc kubenswrapper[5039]: I1124 13:21:37.591349 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:21:37 crc kubenswrapper[5039]: I1124 13:21:37.591420 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kr676\" (UniqueName: \"kubernetes.io/projected/6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0-kube-api-access-kr676\") on node \"crc\" DevicePath \"\"" Nov 24 13:21:37 crc kubenswrapper[5039]: I1124 13:21:37.591439 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:21:37 crc kubenswrapper[5039]: I1124 13:21:37.976239 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gj66q" event={"ID":"6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0","Type":"ContainerDied","Data":"095dd7e2d7557ac06e0eb209042efb0a172806c4a8ad1af7bd78fbcab335a91b"} Nov 24 13:21:37 crc kubenswrapper[5039]: I1124 13:21:37.976299 5039 scope.go:117] "RemoveContainer" containerID="72a283df2d53ab5053a44c6896b458a5abf98fbb9974fa3b818661dfe2718e70" Nov 24 13:21:37 crc kubenswrapper[5039]: I1124 13:21:37.976553 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gj66q" Nov 24 13:21:37 crc kubenswrapper[5039]: I1124 13:21:37.991534 5039 scope.go:117] "RemoveContainer" containerID="9626e97704a011c2969781aa6adc89fd936ebf8dcf4d99ca098d60f57bd0225d" Nov 24 13:21:38 crc kubenswrapper[5039]: I1124 13:21:38.010909 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gj66q"] Nov 24 13:21:38 crc kubenswrapper[5039]: I1124 13:21:38.014178 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-gj66q"] Nov 24 13:21:38 crc kubenswrapper[5039]: I1124 13:21:38.019909 5039 scope.go:117] "RemoveContainer" containerID="ceac1b433e91998ccdc6abbf9823554887915bc49c970fbc896a9fee01460390" Nov 24 13:21:38 crc kubenswrapper[5039]: I1124 13:21:38.312588 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0" path="/var/lib/kubelet/pods/6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0/volumes" Nov 24 13:21:50 crc kubenswrapper[5039]: I1124 13:21:50.102170 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:21:50 crc kubenswrapper[5039]: I1124 13:21:50.103016 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:21:50 crc kubenswrapper[5039]: I1124 13:21:50.103088 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:21:50 crc kubenswrapper[5039]: I1124 13:21:50.103985 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 13:21:50 crc kubenswrapper[5039]: I1124 13:21:50.104146 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488" gracePeriod=600 Nov 24 13:21:50 crc kubenswrapper[5039]: I1124 13:21:50.691230 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-b8b2f"] Nov 24 13:21:51 crc kubenswrapper[5039]: I1124 13:21:51.041793 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488" exitCode=0 Nov 24 13:21:51 crc kubenswrapper[5039]: I1124 13:21:51.041859 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488"} Nov 24 13:21:51 crc kubenswrapper[5039]: I1124 13:21:51.042149 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"ded1533ed2e79a9e9a4e41890abd61e8a4fb5cb75217c4bf2484efe880ce1e04"} Nov 24 13:22:15 crc kubenswrapper[5039]: I1124 13:22:15.736094 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" podUID="b073719c-394b-496f-9d64-75681184acb0" containerName="oauth-openshift" containerID="cri-o://47402c0c79009b511125125e5a954a1ce7950d3777b9e7122d9b0ca1c92b9535" gracePeriod=15 Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.115086 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.156658 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-786b6d57dd-xjqfs"] Nov 24 13:22:16 crc kubenswrapper[5039]: E1124 13:22:16.156835 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30fffdbb-286b-47ad-887b-fd8ec67725d6" containerName="registry-server" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.156847 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="30fffdbb-286b-47ad-887b-fd8ec67725d6" containerName="registry-server" Nov 24 13:22:16 crc kubenswrapper[5039]: E1124 13:22:16.156857 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0" containerName="extract-content" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.156862 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0" containerName="extract-content" Nov 24 13:22:16 crc kubenswrapper[5039]: E1124 13:22:16.156871 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82a55c8b-6639-49c5-b57a-99ed016d7e7c" containerName="extract-content" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.156876 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="82a55c8b-6639-49c5-b57a-99ed016d7e7c" containerName="extract-content" Nov 24 13:22:16 crc kubenswrapper[5039]: E1124 13:22:16.156884 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69a0e706-e6ab-4dee-bffa-5dd23c530205" containerName="pruner" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.156890 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="69a0e706-e6ab-4dee-bffa-5dd23c530205" containerName="pruner" Nov 24 13:22:16 crc kubenswrapper[5039]: E1124 13:22:16.156896 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0" containerName="extract-utilities" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.156902 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0" containerName="extract-utilities" Nov 24 13:22:16 crc kubenswrapper[5039]: E1124 13:22:16.156911 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0" containerName="registry-server" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.156916 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0" containerName="registry-server" Nov 24 13:22:16 crc kubenswrapper[5039]: E1124 13:22:16.156926 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e77b6e7-8d78-4d0d-ab33-c27c4d168b74" containerName="extract-content" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.156932 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e77b6e7-8d78-4d0d-ab33-c27c4d168b74" containerName="extract-content" Nov 24 13:22:16 crc kubenswrapper[5039]: E1124 13:22:16.156940 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b073719c-394b-496f-9d64-75681184acb0" containerName="oauth-openshift" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.156945 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b073719c-394b-496f-9d64-75681184acb0" containerName="oauth-openshift" Nov 24 13:22:16 crc kubenswrapper[5039]: E1124 13:22:16.156952 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b572c340-7119-4771-82b7-44f475fd9c82" containerName="pruner" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.156958 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b572c340-7119-4771-82b7-44f475fd9c82" containerName="pruner" Nov 24 13:22:16 crc kubenswrapper[5039]: E1124 13:22:16.156966 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e77b6e7-8d78-4d0d-ab33-c27c4d168b74" containerName="registry-server" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.156972 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e77b6e7-8d78-4d0d-ab33-c27c4d168b74" containerName="registry-server" Nov 24 13:22:16 crc kubenswrapper[5039]: E1124 13:22:16.156981 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30fffdbb-286b-47ad-887b-fd8ec67725d6" containerName="extract-content" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.156986 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="30fffdbb-286b-47ad-887b-fd8ec67725d6" containerName="extract-content" Nov 24 13:22:16 crc kubenswrapper[5039]: E1124 13:22:16.156995 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e77b6e7-8d78-4d0d-ab33-c27c4d168b74" containerName="extract-utilities" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.157001 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e77b6e7-8d78-4d0d-ab33-c27c4d168b74" containerName="extract-utilities" Nov 24 13:22:16 crc kubenswrapper[5039]: E1124 13:22:16.157008 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82a55c8b-6639-49c5-b57a-99ed016d7e7c" containerName="extract-utilities" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.157014 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="82a55c8b-6639-49c5-b57a-99ed016d7e7c" containerName="extract-utilities" Nov 24 13:22:16 crc kubenswrapper[5039]: E1124 13:22:16.157020 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30fffdbb-286b-47ad-887b-fd8ec67725d6" containerName="extract-utilities" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.157026 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="30fffdbb-286b-47ad-887b-fd8ec67725d6" containerName="extract-utilities" Nov 24 13:22:16 crc kubenswrapper[5039]: E1124 13:22:16.157032 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6170f687-30e2-44b0-860e-ddcee4e4f2d4" containerName="collect-profiles" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.157037 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="6170f687-30e2-44b0-860e-ddcee4e4f2d4" containerName="collect-profiles" Nov 24 13:22:16 crc kubenswrapper[5039]: E1124 13:22:16.157043 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82a55c8b-6639-49c5-b57a-99ed016d7e7c" containerName="registry-server" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.157049 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="82a55c8b-6639-49c5-b57a-99ed016d7e7c" containerName="registry-server" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.157124 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="b073719c-394b-496f-9d64-75681184acb0" containerName="oauth-openshift" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.157135 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fbb9ba5-2322-4ee1-bf8f-ddb32b535ef0" containerName="registry-server" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.157145 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="30fffdbb-286b-47ad-887b-fd8ec67725d6" containerName="registry-server" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.157153 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="82a55c8b-6639-49c5-b57a-99ed016d7e7c" containerName="registry-server" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.157161 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="b572c340-7119-4771-82b7-44f475fd9c82" containerName="pruner" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.157168 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="69a0e706-e6ab-4dee-bffa-5dd23c530205" containerName="pruner" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.157174 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="6170f687-30e2-44b0-860e-ddcee4e4f2d4" containerName="collect-profiles" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.157184 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e77b6e7-8d78-4d0d-ab33-c27c4d168b74" containerName="registry-server" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.157524 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.176020 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-786b6d57dd-xjqfs"] Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.200801 5039 generic.go:334] "Generic (PLEG): container finished" podID="b073719c-394b-496f-9d64-75681184acb0" containerID="47402c0c79009b511125125e5a954a1ce7950d3777b9e7122d9b0ca1c92b9535" exitCode=0 Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.200870 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" event={"ID":"b073719c-394b-496f-9d64-75681184acb0","Type":"ContainerDied","Data":"47402c0c79009b511125125e5a954a1ce7950d3777b9e7122d9b0ca1c92b9535"} Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.200914 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" event={"ID":"b073719c-394b-496f-9d64-75681184acb0","Type":"ContainerDied","Data":"0366868a7747af71dafe199edf7949c6e099b4d86f888f34fbc62d142c8a8c06"} Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.200946 5039 scope.go:117] "RemoveContainer" containerID="47402c0c79009b511125125e5a954a1ce7950d3777b9e7122d9b0ca1c92b9535" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.201132 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-b8b2f" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.225477 5039 scope.go:117] "RemoveContainer" containerID="47402c0c79009b511125125e5a954a1ce7950d3777b9e7122d9b0ca1c92b9535" Nov 24 13:22:16 crc kubenswrapper[5039]: E1124 13:22:16.226089 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47402c0c79009b511125125e5a954a1ce7950d3777b9e7122d9b0ca1c92b9535\": container with ID starting with 47402c0c79009b511125125e5a954a1ce7950d3777b9e7122d9b0ca1c92b9535 not found: ID does not exist" containerID="47402c0c79009b511125125e5a954a1ce7950d3777b9e7122d9b0ca1c92b9535" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.226165 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47402c0c79009b511125125e5a954a1ce7950d3777b9e7122d9b0ca1c92b9535"} err="failed to get container status \"47402c0c79009b511125125e5a954a1ce7950d3777b9e7122d9b0ca1c92b9535\": rpc error: code = NotFound desc = could not find container \"47402c0c79009b511125125e5a954a1ce7950d3777b9e7122d9b0ca1c92b9535\": container with ID starting with 47402c0c79009b511125125e5a954a1ce7950d3777b9e7122d9b0ca1c92b9535 not found: ID does not exist" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.303793 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-session\") pod \"b073719c-394b-496f-9d64-75681184acb0\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.303848 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b073719c-394b-496f-9d64-75681184acb0-audit-dir\") pod \"b073719c-394b-496f-9d64-75681184acb0\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.303916 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6wx67\" (UniqueName: \"kubernetes.io/projected/b073719c-394b-496f-9d64-75681184acb0-kube-api-access-6wx67\") pod \"b073719c-394b-496f-9d64-75681184acb0\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.303950 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-template-error\") pod \"b073719c-394b-496f-9d64-75681184acb0\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.303982 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-service-ca\") pod \"b073719c-394b-496f-9d64-75681184acb0\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.304025 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-trusted-ca-bundle\") pod \"b073719c-394b-496f-9d64-75681184acb0\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.304052 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-template-login\") pod \"b073719c-394b-496f-9d64-75681184acb0\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.304076 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-idp-0-file-data\") pod \"b073719c-394b-496f-9d64-75681184acb0\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.304128 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-router-certs\") pod \"b073719c-394b-496f-9d64-75681184acb0\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.304154 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-template-provider-selection\") pod \"b073719c-394b-496f-9d64-75681184acb0\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.304183 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-cliconfig\") pod \"b073719c-394b-496f-9d64-75681184acb0\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.304208 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-audit-policies\") pod \"b073719c-394b-496f-9d64-75681184acb0\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.304237 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-ocp-branding-template\") pod \"b073719c-394b-496f-9d64-75681184acb0\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.304260 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-serving-cert\") pod \"b073719c-394b-496f-9d64-75681184acb0\" (UID: \"b073719c-394b-496f-9d64-75681184acb0\") " Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.304420 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-system-session\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.304483 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-system-router-certs\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.304534 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a5034c38-f0b5-4191-8172-940cf1526202-audit-policies\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.304558 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.304584 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-system-cliconfig\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.304615 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4npp5\" (UniqueName: \"kubernetes.io/projected/a5034c38-f0b5-4191-8172-940cf1526202-kube-api-access-4npp5\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.304637 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-user-template-error\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.304677 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-system-service-ca\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.305317 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a5034c38-f0b5-4191-8172-940cf1526202-audit-dir\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.305316 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b073719c-394b-496f-9d64-75681184acb0-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "b073719c-394b-496f-9d64-75681184acb0" (UID: "b073719c-394b-496f-9d64-75681184acb0"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.305344 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.305405 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-system-serving-cert\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.305480 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.305520 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-user-template-login\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.305540 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.305585 5039 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b073719c-394b-496f-9d64-75681184acb0-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.305580 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "b073719c-394b-496f-9d64-75681184acb0" (UID: "b073719c-394b-496f-9d64-75681184acb0"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.305713 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "b073719c-394b-496f-9d64-75681184acb0" (UID: "b073719c-394b-496f-9d64-75681184acb0"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.306057 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "b073719c-394b-496f-9d64-75681184acb0" (UID: "b073719c-394b-496f-9d64-75681184acb0"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.306093 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "b073719c-394b-496f-9d64-75681184acb0" (UID: "b073719c-394b-496f-9d64-75681184acb0"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.310840 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "b073719c-394b-496f-9d64-75681184acb0" (UID: "b073719c-394b-496f-9d64-75681184acb0"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.311033 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b073719c-394b-496f-9d64-75681184acb0-kube-api-access-6wx67" (OuterVolumeSpecName: "kube-api-access-6wx67") pod "b073719c-394b-496f-9d64-75681184acb0" (UID: "b073719c-394b-496f-9d64-75681184acb0"). InnerVolumeSpecName "kube-api-access-6wx67". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.311906 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "b073719c-394b-496f-9d64-75681184acb0" (UID: "b073719c-394b-496f-9d64-75681184acb0"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.312284 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "b073719c-394b-496f-9d64-75681184acb0" (UID: "b073719c-394b-496f-9d64-75681184acb0"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.312488 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "b073719c-394b-496f-9d64-75681184acb0" (UID: "b073719c-394b-496f-9d64-75681184acb0"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.312923 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "b073719c-394b-496f-9d64-75681184acb0" (UID: "b073719c-394b-496f-9d64-75681184acb0"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.314129 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "b073719c-394b-496f-9d64-75681184acb0" (UID: "b073719c-394b-496f-9d64-75681184acb0"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.314198 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "b073719c-394b-496f-9d64-75681184acb0" (UID: "b073719c-394b-496f-9d64-75681184acb0"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.315800 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "b073719c-394b-496f-9d64-75681184acb0" (UID: "b073719c-394b-496f-9d64-75681184acb0"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.407395 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.407494 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-system-serving-cert\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.407614 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.407662 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-user-template-login\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.407707 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.407765 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-system-session\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.407808 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-system-router-certs\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.407853 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a5034c38-f0b5-4191-8172-940cf1526202-audit-policies\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.407901 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.407955 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-system-cliconfig\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.408188 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-user-template-error\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.408543 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4npp5\" (UniqueName: \"kubernetes.io/projected/a5034c38-f0b5-4191-8172-940cf1526202-kube-api-access-4npp5\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.408626 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-system-service-ca\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.408696 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a5034c38-f0b5-4191-8172-940cf1526202-audit-dir\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.408797 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6wx67\" (UniqueName: \"kubernetes.io/projected/b073719c-394b-496f-9d64-75681184acb0-kube-api-access-6wx67\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.408829 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.408858 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.408892 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.408919 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.408944 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.408971 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.408999 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.409026 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.409051 5039 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b073719c-394b-496f-9d64-75681184acb0-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.409076 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.409103 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.409129 5039 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b073719c-394b-496f-9d64-75681184acb0-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.409220 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a5034c38-f0b5-4191-8172-940cf1526202-audit-dir\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.411156 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a5034c38-f0b5-4191-8172-940cf1526202-audit-policies\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.412141 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.412408 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-system-cliconfig\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.413372 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-system-service-ca\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.414118 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.414171 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.415186 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-user-template-login\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.416346 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-system-session\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.419106 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-system-router-certs\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.419245 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-user-template-error\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.420392 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-system-serving-cert\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.423194 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a5034c38-f0b5-4191-8172-940cf1526202-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.438149 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4npp5\" (UniqueName: \"kubernetes.io/projected/a5034c38-f0b5-4191-8172-940cf1526202-kube-api-access-4npp5\") pod \"oauth-openshift-786b6d57dd-xjqfs\" (UID: \"a5034c38-f0b5-4191-8172-940cf1526202\") " pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.475088 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.536304 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-b8b2f"] Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.539516 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-b8b2f"] Nov 24 13:22:16 crc kubenswrapper[5039]: I1124 13:22:16.676645 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-786b6d57dd-xjqfs"] Nov 24 13:22:17 crc kubenswrapper[5039]: I1124 13:22:17.207781 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" event={"ID":"a5034c38-f0b5-4191-8172-940cf1526202","Type":"ContainerStarted","Data":"fcb05f258eb70ead77475bf2e6347d7da44a017d4bc5a953652f7bc3aa68223f"} Nov 24 13:22:17 crc kubenswrapper[5039]: I1124 13:22:17.207860 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" event={"ID":"a5034c38-f0b5-4191-8172-940cf1526202","Type":"ContainerStarted","Data":"3a77062018b39a1b084c2e32f28267ea52cd642e938712c0bf559bd856833760"} Nov 24 13:22:17 crc kubenswrapper[5039]: I1124 13:22:17.207992 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:17 crc kubenswrapper[5039]: I1124 13:22:17.227968 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" podStartSLOduration=27.227938351 podStartE2EDuration="27.227938351s" podCreationTimestamp="2025-11-24 13:21:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:22:17.226075423 +0000 UTC m=+249.665199993" watchObservedRunningTime="2025-11-24 13:22:17.227938351 +0000 UTC m=+249.667062891" Nov 24 13:22:17 crc kubenswrapper[5039]: I1124 13:22:17.467632 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-786b6d57dd-xjqfs" Nov 24 13:22:18 crc kubenswrapper[5039]: I1124 13:22:18.334235 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b073719c-394b-496f-9d64-75681184acb0" path="/var/lib/kubelet/pods/b073719c-394b-496f-9d64-75681184acb0/volumes" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.334199 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-76mnr"] Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.336205 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-76mnr" podUID="e7514bf7-ccdc-42f0-a159-78d12f91e55c" containerName="registry-server" containerID="cri-o://9cb540a7b572d6ee3e91724abbf04fec807098d0ce229c1d34c9af5a7494e081" gracePeriod=30 Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.345583 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9979w"] Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.346224 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-9979w" podUID="aab10f23-6223-4554-9a20-3669e7e0eb72" containerName="registry-server" containerID="cri-o://8be8aaadba9972bbaa2b66e15014a9539cef6ccc900d4df7e4f978e223263e18" gracePeriod=30 Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.367362 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-k884h"] Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.367618 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-k884h" podUID="4637ec55-c9ee-48a4-9351-6a382efe4c91" containerName="marketplace-operator" containerID="cri-o://d2b067a7ef2146dd02670527143ed669da4760a9b86427a5bbc9b9dc03bbae94" gracePeriod=30 Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.371628 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cd9cq"] Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.371925 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-cd9cq" podUID="37739944-4511-4fe7-95df-09d42974532e" containerName="registry-server" containerID="cri-o://c81b97a4494d2c4b7570562579a70ede5220900d52bbcfcfe993736d2baf701f" gracePeriod=30 Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.376714 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-x68z7"] Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.377029 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-x68z7" podUID="af4ec023-8129-4c1b-99c4-20e814084d4a" containerName="registry-server" containerID="cri-o://c91399e795dbd979f837648d3b9f284da98760a6f31c0e4153dd7670e38606a9" gracePeriod=30 Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.378001 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-zhpf5"] Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.379035 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-zhpf5" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.381597 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-zhpf5"] Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.453225 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c0bf8d9e-d6fb-400d-8fa2-d547a9a64107-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-zhpf5\" (UID: \"c0bf8d9e-d6fb-400d-8fa2-d547a9a64107\") " pod="openshift-marketplace/marketplace-operator-79b997595-zhpf5" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.453274 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgr5q\" (UniqueName: \"kubernetes.io/projected/c0bf8d9e-d6fb-400d-8fa2-d547a9a64107-kube-api-access-sgr5q\") pod \"marketplace-operator-79b997595-zhpf5\" (UID: \"c0bf8d9e-d6fb-400d-8fa2-d547a9a64107\") " pod="openshift-marketplace/marketplace-operator-79b997595-zhpf5" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.453307 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c0bf8d9e-d6fb-400d-8fa2-d547a9a64107-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-zhpf5\" (UID: \"c0bf8d9e-d6fb-400d-8fa2-d547a9a64107\") " pod="openshift-marketplace/marketplace-operator-79b997595-zhpf5" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.555087 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c0bf8d9e-d6fb-400d-8fa2-d547a9a64107-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-zhpf5\" (UID: \"c0bf8d9e-d6fb-400d-8fa2-d547a9a64107\") " pod="openshift-marketplace/marketplace-operator-79b997595-zhpf5" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.555143 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgr5q\" (UniqueName: \"kubernetes.io/projected/c0bf8d9e-d6fb-400d-8fa2-d547a9a64107-kube-api-access-sgr5q\") pod \"marketplace-operator-79b997595-zhpf5\" (UID: \"c0bf8d9e-d6fb-400d-8fa2-d547a9a64107\") " pod="openshift-marketplace/marketplace-operator-79b997595-zhpf5" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.555178 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c0bf8d9e-d6fb-400d-8fa2-d547a9a64107-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-zhpf5\" (UID: \"c0bf8d9e-d6fb-400d-8fa2-d547a9a64107\") " pod="openshift-marketplace/marketplace-operator-79b997595-zhpf5" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.558265 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c0bf8d9e-d6fb-400d-8fa2-d547a9a64107-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-zhpf5\" (UID: \"c0bf8d9e-d6fb-400d-8fa2-d547a9a64107\") " pod="openshift-marketplace/marketplace-operator-79b997595-zhpf5" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.565175 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c0bf8d9e-d6fb-400d-8fa2-d547a9a64107-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-zhpf5\" (UID: \"c0bf8d9e-d6fb-400d-8fa2-d547a9a64107\") " pod="openshift-marketplace/marketplace-operator-79b997595-zhpf5" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.573047 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgr5q\" (UniqueName: \"kubernetes.io/projected/c0bf8d9e-d6fb-400d-8fa2-d547a9a64107-kube-api-access-sgr5q\") pod \"marketplace-operator-79b997595-zhpf5\" (UID: \"c0bf8d9e-d6fb-400d-8fa2-d547a9a64107\") " pod="openshift-marketplace/marketplace-operator-79b997595-zhpf5" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.694730 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-zhpf5" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.836621 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-76mnr" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.840940 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x68z7" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.853389 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-k884h" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.867535 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cd9cq" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.873145 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9979w" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.899442 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-zhpf5"] Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.960238 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fkrjz\" (UniqueName: \"kubernetes.io/projected/4637ec55-c9ee-48a4-9351-6a382efe4c91-kube-api-access-fkrjz\") pod \"4637ec55-c9ee-48a4-9351-6a382efe4c91\" (UID: \"4637ec55-c9ee-48a4-9351-6a382efe4c91\") " Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.960559 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aab10f23-6223-4554-9a20-3669e7e0eb72-utilities\") pod \"aab10f23-6223-4554-9a20-3669e7e0eb72\" (UID: \"aab10f23-6223-4554-9a20-3669e7e0eb72\") " Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.960578 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7514bf7-ccdc-42f0-a159-78d12f91e55c-utilities\") pod \"e7514bf7-ccdc-42f0-a159-78d12f91e55c\" (UID: \"e7514bf7-ccdc-42f0-a159-78d12f91e55c\") " Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.960642 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aab10f23-6223-4554-9a20-3669e7e0eb72-catalog-content\") pod \"aab10f23-6223-4554-9a20-3669e7e0eb72\" (UID: \"aab10f23-6223-4554-9a20-3669e7e0eb72\") " Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.960745 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4ph79\" (UniqueName: \"kubernetes.io/projected/e7514bf7-ccdc-42f0-a159-78d12f91e55c-kube-api-access-4ph79\") pod \"e7514bf7-ccdc-42f0-a159-78d12f91e55c\" (UID: \"e7514bf7-ccdc-42f0-a159-78d12f91e55c\") " Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.960810 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x72vn\" (UniqueName: \"kubernetes.io/projected/af4ec023-8129-4c1b-99c4-20e814084d4a-kube-api-access-x72vn\") pod \"af4ec023-8129-4c1b-99c4-20e814084d4a\" (UID: \"af4ec023-8129-4c1b-99c4-20e814084d4a\") " Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.960838 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9q8wx\" (UniqueName: \"kubernetes.io/projected/37739944-4511-4fe7-95df-09d42974532e-kube-api-access-9q8wx\") pod \"37739944-4511-4fe7-95df-09d42974532e\" (UID: \"37739944-4511-4fe7-95df-09d42974532e\") " Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.960868 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7514bf7-ccdc-42f0-a159-78d12f91e55c-catalog-content\") pod \"e7514bf7-ccdc-42f0-a159-78d12f91e55c\" (UID: \"e7514bf7-ccdc-42f0-a159-78d12f91e55c\") " Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.960901 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37739944-4511-4fe7-95df-09d42974532e-utilities\") pod \"37739944-4511-4fe7-95df-09d42974532e\" (UID: \"37739944-4511-4fe7-95df-09d42974532e\") " Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.960923 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af4ec023-8129-4c1b-99c4-20e814084d4a-utilities\") pod \"af4ec023-8129-4c1b-99c4-20e814084d4a\" (UID: \"af4ec023-8129-4c1b-99c4-20e814084d4a\") " Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.960945 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4637ec55-c9ee-48a4-9351-6a382efe4c91-marketplace-trusted-ca\") pod \"4637ec55-c9ee-48a4-9351-6a382efe4c91\" (UID: \"4637ec55-c9ee-48a4-9351-6a382efe4c91\") " Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.960969 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4637ec55-c9ee-48a4-9351-6a382efe4c91-marketplace-operator-metrics\") pod \"4637ec55-c9ee-48a4-9351-6a382efe4c91\" (UID: \"4637ec55-c9ee-48a4-9351-6a382efe4c91\") " Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.960990 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rqv77\" (UniqueName: \"kubernetes.io/projected/aab10f23-6223-4554-9a20-3669e7e0eb72-kube-api-access-rqv77\") pod \"aab10f23-6223-4554-9a20-3669e7e0eb72\" (UID: \"aab10f23-6223-4554-9a20-3669e7e0eb72\") " Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.961021 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37739944-4511-4fe7-95df-09d42974532e-catalog-content\") pod \"37739944-4511-4fe7-95df-09d42974532e\" (UID: \"37739944-4511-4fe7-95df-09d42974532e\") " Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.961038 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af4ec023-8129-4c1b-99c4-20e814084d4a-catalog-content\") pod \"af4ec023-8129-4c1b-99c4-20e814084d4a\" (UID: \"af4ec023-8129-4c1b-99c4-20e814084d4a\") " Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.961667 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aab10f23-6223-4554-9a20-3669e7e0eb72-utilities" (OuterVolumeSpecName: "utilities") pod "aab10f23-6223-4554-9a20-3669e7e0eb72" (UID: "aab10f23-6223-4554-9a20-3669e7e0eb72"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.962544 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af4ec023-8129-4c1b-99c4-20e814084d4a-utilities" (OuterVolumeSpecName: "utilities") pod "af4ec023-8129-4c1b-99c4-20e814084d4a" (UID: "af4ec023-8129-4c1b-99c4-20e814084d4a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.962544 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37739944-4511-4fe7-95df-09d42974532e-utilities" (OuterVolumeSpecName: "utilities") pod "37739944-4511-4fe7-95df-09d42974532e" (UID: "37739944-4511-4fe7-95df-09d42974532e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.963231 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4637ec55-c9ee-48a4-9351-6a382efe4c91-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "4637ec55-c9ee-48a4-9351-6a382efe4c91" (UID: "4637ec55-c9ee-48a4-9351-6a382efe4c91"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.964028 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e7514bf7-ccdc-42f0-a159-78d12f91e55c-utilities" (OuterVolumeSpecName: "utilities") pod "e7514bf7-ccdc-42f0-a159-78d12f91e55c" (UID: "e7514bf7-ccdc-42f0-a159-78d12f91e55c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.965696 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37739944-4511-4fe7-95df-09d42974532e-kube-api-access-9q8wx" (OuterVolumeSpecName: "kube-api-access-9q8wx") pod "37739944-4511-4fe7-95df-09d42974532e" (UID: "37739944-4511-4fe7-95df-09d42974532e"). InnerVolumeSpecName "kube-api-access-9q8wx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.966272 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4637ec55-c9ee-48a4-9351-6a382efe4c91-kube-api-access-fkrjz" (OuterVolumeSpecName: "kube-api-access-fkrjz") pod "4637ec55-c9ee-48a4-9351-6a382efe4c91" (UID: "4637ec55-c9ee-48a4-9351-6a382efe4c91"). InnerVolumeSpecName "kube-api-access-fkrjz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.966541 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aab10f23-6223-4554-9a20-3669e7e0eb72-kube-api-access-rqv77" (OuterVolumeSpecName: "kube-api-access-rqv77") pod "aab10f23-6223-4554-9a20-3669e7e0eb72" (UID: "aab10f23-6223-4554-9a20-3669e7e0eb72"). InnerVolumeSpecName "kube-api-access-rqv77". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.966719 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4637ec55-c9ee-48a4-9351-6a382efe4c91-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "4637ec55-c9ee-48a4-9351-6a382efe4c91" (UID: "4637ec55-c9ee-48a4-9351-6a382efe4c91"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.968434 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af4ec023-8129-4c1b-99c4-20e814084d4a-kube-api-access-x72vn" (OuterVolumeSpecName: "kube-api-access-x72vn") pod "af4ec023-8129-4c1b-99c4-20e814084d4a" (UID: "af4ec023-8129-4c1b-99c4-20e814084d4a"). InnerVolumeSpecName "kube-api-access-x72vn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:22:28 crc kubenswrapper[5039]: I1124 13:22:28.971997 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7514bf7-ccdc-42f0-a159-78d12f91e55c-kube-api-access-4ph79" (OuterVolumeSpecName: "kube-api-access-4ph79") pod "e7514bf7-ccdc-42f0-a159-78d12f91e55c" (UID: "e7514bf7-ccdc-42f0-a159-78d12f91e55c"). InnerVolumeSpecName "kube-api-access-4ph79". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.002267 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37739944-4511-4fe7-95df-09d42974532e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "37739944-4511-4fe7-95df-09d42974532e" (UID: "37739944-4511-4fe7-95df-09d42974532e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.025648 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e7514bf7-ccdc-42f0-a159-78d12f91e55c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e7514bf7-ccdc-42f0-a159-78d12f91e55c" (UID: "e7514bf7-ccdc-42f0-a159-78d12f91e55c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.034877 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aab10f23-6223-4554-9a20-3669e7e0eb72-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "aab10f23-6223-4554-9a20-3669e7e0eb72" (UID: "aab10f23-6223-4554-9a20-3669e7e0eb72"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.048367 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af4ec023-8129-4c1b-99c4-20e814084d4a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "af4ec023-8129-4c1b-99c4-20e814084d4a" (UID: "af4ec023-8129-4c1b-99c4-20e814084d4a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.062086 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37739944-4511-4fe7-95df-09d42974532e-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.062124 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af4ec023-8129-4c1b-99c4-20e814084d4a-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.062138 5039 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4637ec55-c9ee-48a4-9351-6a382efe4c91-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.062149 5039 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4637ec55-c9ee-48a4-9351-6a382efe4c91-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.062161 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rqv77\" (UniqueName: \"kubernetes.io/projected/aab10f23-6223-4554-9a20-3669e7e0eb72-kube-api-access-rqv77\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.062171 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37739944-4511-4fe7-95df-09d42974532e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.062178 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af4ec023-8129-4c1b-99c4-20e814084d4a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.062187 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fkrjz\" (UniqueName: \"kubernetes.io/projected/4637ec55-c9ee-48a4-9351-6a382efe4c91-kube-api-access-fkrjz\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.062197 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aab10f23-6223-4554-9a20-3669e7e0eb72-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.062206 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7514bf7-ccdc-42f0-a159-78d12f91e55c-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.062215 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aab10f23-6223-4554-9a20-3669e7e0eb72-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.062224 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4ph79\" (UniqueName: \"kubernetes.io/projected/e7514bf7-ccdc-42f0-a159-78d12f91e55c-kube-api-access-4ph79\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.062236 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x72vn\" (UniqueName: \"kubernetes.io/projected/af4ec023-8129-4c1b-99c4-20e814084d4a-kube-api-access-x72vn\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.062244 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9q8wx\" (UniqueName: \"kubernetes.io/projected/37739944-4511-4fe7-95df-09d42974532e-kube-api-access-9q8wx\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.062252 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7514bf7-ccdc-42f0-a159-78d12f91e55c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.267841 5039 generic.go:334] "Generic (PLEG): container finished" podID="4637ec55-c9ee-48a4-9351-6a382efe4c91" containerID="d2b067a7ef2146dd02670527143ed669da4760a9b86427a5bbc9b9dc03bbae94" exitCode=0 Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.267909 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-k884h" event={"ID":"4637ec55-c9ee-48a4-9351-6a382efe4c91","Type":"ContainerDied","Data":"d2b067a7ef2146dd02670527143ed669da4760a9b86427a5bbc9b9dc03bbae94"} Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.267933 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-k884h" event={"ID":"4637ec55-c9ee-48a4-9351-6a382efe4c91","Type":"ContainerDied","Data":"a8e527a352d3f4ccc9546b1e0cd20b482c28dfc99d24f977ed91a88211ad9e22"} Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.267961 5039 scope.go:117] "RemoveContainer" containerID="d2b067a7ef2146dd02670527143ed669da4760a9b86427a5bbc9b9dc03bbae94" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.269387 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-k884h" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.269890 5039 generic.go:334] "Generic (PLEG): container finished" podID="37739944-4511-4fe7-95df-09d42974532e" containerID="c81b97a4494d2c4b7570562579a70ede5220900d52bbcfcfe993736d2baf701f" exitCode=0 Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.269933 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cd9cq" event={"ID":"37739944-4511-4fe7-95df-09d42974532e","Type":"ContainerDied","Data":"c81b97a4494d2c4b7570562579a70ede5220900d52bbcfcfe993736d2baf701f"} Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.269949 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cd9cq" event={"ID":"37739944-4511-4fe7-95df-09d42974532e","Type":"ContainerDied","Data":"69d11a45ef96500b26081bc989c146bf0de76ea6780cf430d1a5e5bc8d20f8d8"} Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.270003 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cd9cq" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.272233 5039 generic.go:334] "Generic (PLEG): container finished" podID="af4ec023-8129-4c1b-99c4-20e814084d4a" containerID="c91399e795dbd979f837648d3b9f284da98760a6f31c0e4153dd7670e38606a9" exitCode=0 Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.272280 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x68z7" event={"ID":"af4ec023-8129-4c1b-99c4-20e814084d4a","Type":"ContainerDied","Data":"c91399e795dbd979f837648d3b9f284da98760a6f31c0e4153dd7670e38606a9"} Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.272295 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x68z7" event={"ID":"af4ec023-8129-4c1b-99c4-20e814084d4a","Type":"ContainerDied","Data":"1da2333063fd57ae02561ff6a88323d25bc97078ec87725424d56ee848fdb9d3"} Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.272298 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x68z7" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.275761 5039 generic.go:334] "Generic (PLEG): container finished" podID="e7514bf7-ccdc-42f0-a159-78d12f91e55c" containerID="9cb540a7b572d6ee3e91724abbf04fec807098d0ce229c1d34c9af5a7494e081" exitCode=0 Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.275804 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-76mnr" event={"ID":"e7514bf7-ccdc-42f0-a159-78d12f91e55c","Type":"ContainerDied","Data":"9cb540a7b572d6ee3e91724abbf04fec807098d0ce229c1d34c9af5a7494e081"} Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.275821 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-76mnr" event={"ID":"e7514bf7-ccdc-42f0-a159-78d12f91e55c","Type":"ContainerDied","Data":"a74172e25633ec87467e3ee86841d0d32b9178d6940c4de9931da9cdf2af68ba"} Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.275866 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-76mnr" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.280079 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-zhpf5" event={"ID":"c0bf8d9e-d6fb-400d-8fa2-d547a9a64107","Type":"ContainerStarted","Data":"163fe4b1260e3b613a34b3493a145bb88601643324e93799ae938489dfb53f49"} Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.280150 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-zhpf5" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.280164 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-zhpf5" event={"ID":"c0bf8d9e-d6fb-400d-8fa2-d547a9a64107","Type":"ContainerStarted","Data":"207f785d03da16495a5cf989642ad5c0c2a56fddc566f41bab2c5ca6a626fd0f"} Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.282097 5039 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-zhpf5 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.56:8080/healthz\": dial tcp 10.217.0.56:8080: connect: connection refused" start-of-body= Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.282210 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-zhpf5" podUID="c0bf8d9e-d6fb-400d-8fa2-d547a9a64107" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.56:8080/healthz\": dial tcp 10.217.0.56:8080: connect: connection refused" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.283221 5039 generic.go:334] "Generic (PLEG): container finished" podID="aab10f23-6223-4554-9a20-3669e7e0eb72" containerID="8be8aaadba9972bbaa2b66e15014a9539cef6ccc900d4df7e4f978e223263e18" exitCode=0 Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.283257 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9979w" event={"ID":"aab10f23-6223-4554-9a20-3669e7e0eb72","Type":"ContainerDied","Data":"8be8aaadba9972bbaa2b66e15014a9539cef6ccc900d4df7e4f978e223263e18"} Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.283281 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9979w" event={"ID":"aab10f23-6223-4554-9a20-3669e7e0eb72","Type":"ContainerDied","Data":"0f7a3ec88cf752e692357c2bbb7d799fbd93d64f6c04a44635a7fb3a222f26e0"} Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.283337 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9979w" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.287334 5039 scope.go:117] "RemoveContainer" containerID="d2b067a7ef2146dd02670527143ed669da4760a9b86427a5bbc9b9dc03bbae94" Nov 24 13:22:29 crc kubenswrapper[5039]: E1124 13:22:29.288440 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2b067a7ef2146dd02670527143ed669da4760a9b86427a5bbc9b9dc03bbae94\": container with ID starting with d2b067a7ef2146dd02670527143ed669da4760a9b86427a5bbc9b9dc03bbae94 not found: ID does not exist" containerID="d2b067a7ef2146dd02670527143ed669da4760a9b86427a5bbc9b9dc03bbae94" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.288486 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2b067a7ef2146dd02670527143ed669da4760a9b86427a5bbc9b9dc03bbae94"} err="failed to get container status \"d2b067a7ef2146dd02670527143ed669da4760a9b86427a5bbc9b9dc03bbae94\": rpc error: code = NotFound desc = could not find container \"d2b067a7ef2146dd02670527143ed669da4760a9b86427a5bbc9b9dc03bbae94\": container with ID starting with d2b067a7ef2146dd02670527143ed669da4760a9b86427a5bbc9b9dc03bbae94 not found: ID does not exist" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.288534 5039 scope.go:117] "RemoveContainer" containerID="c81b97a4494d2c4b7570562579a70ede5220900d52bbcfcfe993736d2baf701f" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.302548 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-zhpf5" podStartSLOduration=1.30253189 podStartE2EDuration="1.30253189s" podCreationTimestamp="2025-11-24 13:22:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:22:29.297826438 +0000 UTC m=+261.736950938" watchObservedRunningTime="2025-11-24 13:22:29.30253189 +0000 UTC m=+261.741656380" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.311454 5039 scope.go:117] "RemoveContainer" containerID="5302180f020c0c586237a2ca41a425bbeb6c1dc998f292bd18c0f11041fd6cdc" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.332118 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-k884h"] Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.338613 5039 scope.go:117] "RemoveContainer" containerID="8c03ec056c99f14b798bdf05f4ff01957a19483b65452e3473afc100f15fe80f" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.344930 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-k884h"] Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.359130 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cd9cq"] Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.368564 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-cd9cq"] Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.372122 5039 scope.go:117] "RemoveContainer" containerID="c81b97a4494d2c4b7570562579a70ede5220900d52bbcfcfe993736d2baf701f" Nov 24 13:22:29 crc kubenswrapper[5039]: E1124 13:22:29.372430 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c81b97a4494d2c4b7570562579a70ede5220900d52bbcfcfe993736d2baf701f\": container with ID starting with c81b97a4494d2c4b7570562579a70ede5220900d52bbcfcfe993736d2baf701f not found: ID does not exist" containerID="c81b97a4494d2c4b7570562579a70ede5220900d52bbcfcfe993736d2baf701f" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.372463 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c81b97a4494d2c4b7570562579a70ede5220900d52bbcfcfe993736d2baf701f"} err="failed to get container status \"c81b97a4494d2c4b7570562579a70ede5220900d52bbcfcfe993736d2baf701f\": rpc error: code = NotFound desc = could not find container \"c81b97a4494d2c4b7570562579a70ede5220900d52bbcfcfe993736d2baf701f\": container with ID starting with c81b97a4494d2c4b7570562579a70ede5220900d52bbcfcfe993736d2baf701f not found: ID does not exist" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.372484 5039 scope.go:117] "RemoveContainer" containerID="5302180f020c0c586237a2ca41a425bbeb6c1dc998f292bd18c0f11041fd6cdc" Nov 24 13:22:29 crc kubenswrapper[5039]: E1124 13:22:29.372844 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5302180f020c0c586237a2ca41a425bbeb6c1dc998f292bd18c0f11041fd6cdc\": container with ID starting with 5302180f020c0c586237a2ca41a425bbeb6c1dc998f292bd18c0f11041fd6cdc not found: ID does not exist" containerID="5302180f020c0c586237a2ca41a425bbeb6c1dc998f292bd18c0f11041fd6cdc" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.372880 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5302180f020c0c586237a2ca41a425bbeb6c1dc998f292bd18c0f11041fd6cdc"} err="failed to get container status \"5302180f020c0c586237a2ca41a425bbeb6c1dc998f292bd18c0f11041fd6cdc\": rpc error: code = NotFound desc = could not find container \"5302180f020c0c586237a2ca41a425bbeb6c1dc998f292bd18c0f11041fd6cdc\": container with ID starting with 5302180f020c0c586237a2ca41a425bbeb6c1dc998f292bd18c0f11041fd6cdc not found: ID does not exist" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.372907 5039 scope.go:117] "RemoveContainer" containerID="8c03ec056c99f14b798bdf05f4ff01957a19483b65452e3473afc100f15fe80f" Nov 24 13:22:29 crc kubenswrapper[5039]: E1124 13:22:29.373474 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c03ec056c99f14b798bdf05f4ff01957a19483b65452e3473afc100f15fe80f\": container with ID starting with 8c03ec056c99f14b798bdf05f4ff01957a19483b65452e3473afc100f15fe80f not found: ID does not exist" containerID="8c03ec056c99f14b798bdf05f4ff01957a19483b65452e3473afc100f15fe80f" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.373558 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c03ec056c99f14b798bdf05f4ff01957a19483b65452e3473afc100f15fe80f"} err="failed to get container status \"8c03ec056c99f14b798bdf05f4ff01957a19483b65452e3473afc100f15fe80f\": rpc error: code = NotFound desc = could not find container \"8c03ec056c99f14b798bdf05f4ff01957a19483b65452e3473afc100f15fe80f\": container with ID starting with 8c03ec056c99f14b798bdf05f4ff01957a19483b65452e3473afc100f15fe80f not found: ID does not exist" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.373591 5039 scope.go:117] "RemoveContainer" containerID="c91399e795dbd979f837648d3b9f284da98760a6f31c0e4153dd7670e38606a9" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.378412 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-x68z7"] Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.387669 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-x68z7"] Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.392897 5039 scope.go:117] "RemoveContainer" containerID="f81cc69053655aab7cb2e2531f937da9ec1a1da527481788457490ab3bb6bef1" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.392998 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-76mnr"] Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.393025 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-76mnr"] Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.397918 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9979w"] Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.400850 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-9979w"] Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.415194 5039 scope.go:117] "RemoveContainer" containerID="92538f189455c595c39ec33d229091377ae1ac5dad24d5d31bdd208769008513" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.430756 5039 scope.go:117] "RemoveContainer" containerID="c91399e795dbd979f837648d3b9f284da98760a6f31c0e4153dd7670e38606a9" Nov 24 13:22:29 crc kubenswrapper[5039]: E1124 13:22:29.431211 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c91399e795dbd979f837648d3b9f284da98760a6f31c0e4153dd7670e38606a9\": container with ID starting with c91399e795dbd979f837648d3b9f284da98760a6f31c0e4153dd7670e38606a9 not found: ID does not exist" containerID="c91399e795dbd979f837648d3b9f284da98760a6f31c0e4153dd7670e38606a9" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.431244 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c91399e795dbd979f837648d3b9f284da98760a6f31c0e4153dd7670e38606a9"} err="failed to get container status \"c91399e795dbd979f837648d3b9f284da98760a6f31c0e4153dd7670e38606a9\": rpc error: code = NotFound desc = could not find container \"c91399e795dbd979f837648d3b9f284da98760a6f31c0e4153dd7670e38606a9\": container with ID starting with c91399e795dbd979f837648d3b9f284da98760a6f31c0e4153dd7670e38606a9 not found: ID does not exist" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.431284 5039 scope.go:117] "RemoveContainer" containerID="f81cc69053655aab7cb2e2531f937da9ec1a1da527481788457490ab3bb6bef1" Nov 24 13:22:29 crc kubenswrapper[5039]: E1124 13:22:29.431758 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f81cc69053655aab7cb2e2531f937da9ec1a1da527481788457490ab3bb6bef1\": container with ID starting with f81cc69053655aab7cb2e2531f937da9ec1a1da527481788457490ab3bb6bef1 not found: ID does not exist" containerID="f81cc69053655aab7cb2e2531f937da9ec1a1da527481788457490ab3bb6bef1" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.431823 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f81cc69053655aab7cb2e2531f937da9ec1a1da527481788457490ab3bb6bef1"} err="failed to get container status \"f81cc69053655aab7cb2e2531f937da9ec1a1da527481788457490ab3bb6bef1\": rpc error: code = NotFound desc = could not find container \"f81cc69053655aab7cb2e2531f937da9ec1a1da527481788457490ab3bb6bef1\": container with ID starting with f81cc69053655aab7cb2e2531f937da9ec1a1da527481788457490ab3bb6bef1 not found: ID does not exist" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.431844 5039 scope.go:117] "RemoveContainer" containerID="92538f189455c595c39ec33d229091377ae1ac5dad24d5d31bdd208769008513" Nov 24 13:22:29 crc kubenswrapper[5039]: E1124 13:22:29.432299 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92538f189455c595c39ec33d229091377ae1ac5dad24d5d31bdd208769008513\": container with ID starting with 92538f189455c595c39ec33d229091377ae1ac5dad24d5d31bdd208769008513 not found: ID does not exist" containerID="92538f189455c595c39ec33d229091377ae1ac5dad24d5d31bdd208769008513" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.432339 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92538f189455c595c39ec33d229091377ae1ac5dad24d5d31bdd208769008513"} err="failed to get container status \"92538f189455c595c39ec33d229091377ae1ac5dad24d5d31bdd208769008513\": rpc error: code = NotFound desc = could not find container \"92538f189455c595c39ec33d229091377ae1ac5dad24d5d31bdd208769008513\": container with ID starting with 92538f189455c595c39ec33d229091377ae1ac5dad24d5d31bdd208769008513 not found: ID does not exist" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.432355 5039 scope.go:117] "RemoveContainer" containerID="9cb540a7b572d6ee3e91724abbf04fec807098d0ce229c1d34c9af5a7494e081" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.443559 5039 scope.go:117] "RemoveContainer" containerID="9b10b2cb0655af0d0cd45825be01fda50aa6350c85721da7166f4cbd89908925" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.456873 5039 scope.go:117] "RemoveContainer" containerID="ba208ecf6e5091a3f157925fa9989cbeec02603e1eddc4baffe0864fb1664480" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.478804 5039 scope.go:117] "RemoveContainer" containerID="9cb540a7b572d6ee3e91724abbf04fec807098d0ce229c1d34c9af5a7494e081" Nov 24 13:22:29 crc kubenswrapper[5039]: E1124 13:22:29.480657 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9cb540a7b572d6ee3e91724abbf04fec807098d0ce229c1d34c9af5a7494e081\": container with ID starting with 9cb540a7b572d6ee3e91724abbf04fec807098d0ce229c1d34c9af5a7494e081 not found: ID does not exist" containerID="9cb540a7b572d6ee3e91724abbf04fec807098d0ce229c1d34c9af5a7494e081" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.480713 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9cb540a7b572d6ee3e91724abbf04fec807098d0ce229c1d34c9af5a7494e081"} err="failed to get container status \"9cb540a7b572d6ee3e91724abbf04fec807098d0ce229c1d34c9af5a7494e081\": rpc error: code = NotFound desc = could not find container \"9cb540a7b572d6ee3e91724abbf04fec807098d0ce229c1d34c9af5a7494e081\": container with ID starting with 9cb540a7b572d6ee3e91724abbf04fec807098d0ce229c1d34c9af5a7494e081 not found: ID does not exist" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.480750 5039 scope.go:117] "RemoveContainer" containerID="9b10b2cb0655af0d0cd45825be01fda50aa6350c85721da7166f4cbd89908925" Nov 24 13:22:29 crc kubenswrapper[5039]: E1124 13:22:29.481221 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b10b2cb0655af0d0cd45825be01fda50aa6350c85721da7166f4cbd89908925\": container with ID starting with 9b10b2cb0655af0d0cd45825be01fda50aa6350c85721da7166f4cbd89908925 not found: ID does not exist" containerID="9b10b2cb0655af0d0cd45825be01fda50aa6350c85721da7166f4cbd89908925" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.481253 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b10b2cb0655af0d0cd45825be01fda50aa6350c85721da7166f4cbd89908925"} err="failed to get container status \"9b10b2cb0655af0d0cd45825be01fda50aa6350c85721da7166f4cbd89908925\": rpc error: code = NotFound desc = could not find container \"9b10b2cb0655af0d0cd45825be01fda50aa6350c85721da7166f4cbd89908925\": container with ID starting with 9b10b2cb0655af0d0cd45825be01fda50aa6350c85721da7166f4cbd89908925 not found: ID does not exist" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.481276 5039 scope.go:117] "RemoveContainer" containerID="ba208ecf6e5091a3f157925fa9989cbeec02603e1eddc4baffe0864fb1664480" Nov 24 13:22:29 crc kubenswrapper[5039]: E1124 13:22:29.482483 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba208ecf6e5091a3f157925fa9989cbeec02603e1eddc4baffe0864fb1664480\": container with ID starting with ba208ecf6e5091a3f157925fa9989cbeec02603e1eddc4baffe0864fb1664480 not found: ID does not exist" containerID="ba208ecf6e5091a3f157925fa9989cbeec02603e1eddc4baffe0864fb1664480" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.482553 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba208ecf6e5091a3f157925fa9989cbeec02603e1eddc4baffe0864fb1664480"} err="failed to get container status \"ba208ecf6e5091a3f157925fa9989cbeec02603e1eddc4baffe0864fb1664480\": rpc error: code = NotFound desc = could not find container \"ba208ecf6e5091a3f157925fa9989cbeec02603e1eddc4baffe0864fb1664480\": container with ID starting with ba208ecf6e5091a3f157925fa9989cbeec02603e1eddc4baffe0864fb1664480 not found: ID does not exist" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.482609 5039 scope.go:117] "RemoveContainer" containerID="8be8aaadba9972bbaa2b66e15014a9539cef6ccc900d4df7e4f978e223263e18" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.544158 5039 scope.go:117] "RemoveContainer" containerID="68dbc4b770763d8ce8502f42f569d21bea94bc2df45550b16bf0ea2080c0c0ee" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.560370 5039 scope.go:117] "RemoveContainer" containerID="76eaa4d8419a259653cb282709ab09c84a27c5cc1ade28c3358a7bfd86c6820a" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.583020 5039 scope.go:117] "RemoveContainer" containerID="8be8aaadba9972bbaa2b66e15014a9539cef6ccc900d4df7e4f978e223263e18" Nov 24 13:22:29 crc kubenswrapper[5039]: E1124 13:22:29.583577 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8be8aaadba9972bbaa2b66e15014a9539cef6ccc900d4df7e4f978e223263e18\": container with ID starting with 8be8aaadba9972bbaa2b66e15014a9539cef6ccc900d4df7e4f978e223263e18 not found: ID does not exist" containerID="8be8aaadba9972bbaa2b66e15014a9539cef6ccc900d4df7e4f978e223263e18" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.583619 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8be8aaadba9972bbaa2b66e15014a9539cef6ccc900d4df7e4f978e223263e18"} err="failed to get container status \"8be8aaadba9972bbaa2b66e15014a9539cef6ccc900d4df7e4f978e223263e18\": rpc error: code = NotFound desc = could not find container \"8be8aaadba9972bbaa2b66e15014a9539cef6ccc900d4df7e4f978e223263e18\": container with ID starting with 8be8aaadba9972bbaa2b66e15014a9539cef6ccc900d4df7e4f978e223263e18 not found: ID does not exist" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.583649 5039 scope.go:117] "RemoveContainer" containerID="68dbc4b770763d8ce8502f42f569d21bea94bc2df45550b16bf0ea2080c0c0ee" Nov 24 13:22:29 crc kubenswrapper[5039]: E1124 13:22:29.584125 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68dbc4b770763d8ce8502f42f569d21bea94bc2df45550b16bf0ea2080c0c0ee\": container with ID starting with 68dbc4b770763d8ce8502f42f569d21bea94bc2df45550b16bf0ea2080c0c0ee not found: ID does not exist" containerID="68dbc4b770763d8ce8502f42f569d21bea94bc2df45550b16bf0ea2080c0c0ee" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.584147 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68dbc4b770763d8ce8502f42f569d21bea94bc2df45550b16bf0ea2080c0c0ee"} err="failed to get container status \"68dbc4b770763d8ce8502f42f569d21bea94bc2df45550b16bf0ea2080c0c0ee\": rpc error: code = NotFound desc = could not find container \"68dbc4b770763d8ce8502f42f569d21bea94bc2df45550b16bf0ea2080c0c0ee\": container with ID starting with 68dbc4b770763d8ce8502f42f569d21bea94bc2df45550b16bf0ea2080c0c0ee not found: ID does not exist" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.584159 5039 scope.go:117] "RemoveContainer" containerID="76eaa4d8419a259653cb282709ab09c84a27c5cc1ade28c3358a7bfd86c6820a" Nov 24 13:22:29 crc kubenswrapper[5039]: E1124 13:22:29.584521 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76eaa4d8419a259653cb282709ab09c84a27c5cc1ade28c3358a7bfd86c6820a\": container with ID starting with 76eaa4d8419a259653cb282709ab09c84a27c5cc1ade28c3358a7bfd86c6820a not found: ID does not exist" containerID="76eaa4d8419a259653cb282709ab09c84a27c5cc1ade28c3358a7bfd86c6820a" Nov 24 13:22:29 crc kubenswrapper[5039]: I1124 13:22:29.584557 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76eaa4d8419a259653cb282709ab09c84a27c5cc1ade28c3358a7bfd86c6820a"} err="failed to get container status \"76eaa4d8419a259653cb282709ab09c84a27c5cc1ade28c3358a7bfd86c6820a\": rpc error: code = NotFound desc = could not find container \"76eaa4d8419a259653cb282709ab09c84a27c5cc1ade28c3358a7bfd86c6820a\": container with ID starting with 76eaa4d8419a259653cb282709ab09c84a27c5cc1ade28c3358a7bfd86c6820a not found: ID does not exist" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.296537 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-zhpf5" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.315047 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37739944-4511-4fe7-95df-09d42974532e" path="/var/lib/kubelet/pods/37739944-4511-4fe7-95df-09d42974532e/volumes" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.316116 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4637ec55-c9ee-48a4-9351-6a382efe4c91" path="/var/lib/kubelet/pods/4637ec55-c9ee-48a4-9351-6a382efe4c91/volumes" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.316676 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aab10f23-6223-4554-9a20-3669e7e0eb72" path="/var/lib/kubelet/pods/aab10f23-6223-4554-9a20-3669e7e0eb72/volumes" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.317883 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af4ec023-8129-4c1b-99c4-20e814084d4a" path="/var/lib/kubelet/pods/af4ec023-8129-4c1b-99c4-20e814084d4a/volumes" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.319412 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7514bf7-ccdc-42f0-a159-78d12f91e55c" path="/var/lib/kubelet/pods/e7514bf7-ccdc-42f0-a159-78d12f91e55c/volumes" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.548079 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-c92b4"] Nov 24 13:22:30 crc kubenswrapper[5039]: E1124 13:22:30.548255 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aab10f23-6223-4554-9a20-3669e7e0eb72" containerName="extract-utilities" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.548266 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="aab10f23-6223-4554-9a20-3669e7e0eb72" containerName="extract-utilities" Nov 24 13:22:30 crc kubenswrapper[5039]: E1124 13:22:30.548277 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af4ec023-8129-4c1b-99c4-20e814084d4a" containerName="registry-server" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.548375 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="af4ec023-8129-4c1b-99c4-20e814084d4a" containerName="registry-server" Nov 24 13:22:30 crc kubenswrapper[5039]: E1124 13:22:30.548391 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af4ec023-8129-4c1b-99c4-20e814084d4a" containerName="extract-utilities" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.548397 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="af4ec023-8129-4c1b-99c4-20e814084d4a" containerName="extract-utilities" Nov 24 13:22:30 crc kubenswrapper[5039]: E1124 13:22:30.548404 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7514bf7-ccdc-42f0-a159-78d12f91e55c" containerName="extract-content" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.548409 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7514bf7-ccdc-42f0-a159-78d12f91e55c" containerName="extract-content" Nov 24 13:22:30 crc kubenswrapper[5039]: E1124 13:22:30.548416 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37739944-4511-4fe7-95df-09d42974532e" containerName="extract-utilities" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.548428 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="37739944-4511-4fe7-95df-09d42974532e" containerName="extract-utilities" Nov 24 13:22:30 crc kubenswrapper[5039]: E1124 13:22:30.548436 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37739944-4511-4fe7-95df-09d42974532e" containerName="registry-server" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.548445 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="37739944-4511-4fe7-95df-09d42974532e" containerName="registry-server" Nov 24 13:22:30 crc kubenswrapper[5039]: E1124 13:22:30.548453 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af4ec023-8129-4c1b-99c4-20e814084d4a" containerName="extract-content" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.548459 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="af4ec023-8129-4c1b-99c4-20e814084d4a" containerName="extract-content" Nov 24 13:22:30 crc kubenswrapper[5039]: E1124 13:22:30.548465 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7514bf7-ccdc-42f0-a159-78d12f91e55c" containerName="registry-server" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.548470 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7514bf7-ccdc-42f0-a159-78d12f91e55c" containerName="registry-server" Nov 24 13:22:30 crc kubenswrapper[5039]: E1124 13:22:30.548478 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4637ec55-c9ee-48a4-9351-6a382efe4c91" containerName="marketplace-operator" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.548484 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="4637ec55-c9ee-48a4-9351-6a382efe4c91" containerName="marketplace-operator" Nov 24 13:22:30 crc kubenswrapper[5039]: E1124 13:22:30.548492 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7514bf7-ccdc-42f0-a159-78d12f91e55c" containerName="extract-utilities" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.548510 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7514bf7-ccdc-42f0-a159-78d12f91e55c" containerName="extract-utilities" Nov 24 13:22:30 crc kubenswrapper[5039]: E1124 13:22:30.548517 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aab10f23-6223-4554-9a20-3669e7e0eb72" containerName="extract-content" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.548522 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="aab10f23-6223-4554-9a20-3669e7e0eb72" containerName="extract-content" Nov 24 13:22:30 crc kubenswrapper[5039]: E1124 13:22:30.548531 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37739944-4511-4fe7-95df-09d42974532e" containerName="extract-content" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.548536 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="37739944-4511-4fe7-95df-09d42974532e" containerName="extract-content" Nov 24 13:22:30 crc kubenswrapper[5039]: E1124 13:22:30.548543 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aab10f23-6223-4554-9a20-3669e7e0eb72" containerName="registry-server" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.548549 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="aab10f23-6223-4554-9a20-3669e7e0eb72" containerName="registry-server" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.548632 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="4637ec55-c9ee-48a4-9351-6a382efe4c91" containerName="marketplace-operator" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.548645 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="aab10f23-6223-4554-9a20-3669e7e0eb72" containerName="registry-server" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.548651 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="af4ec023-8129-4c1b-99c4-20e814084d4a" containerName="registry-server" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.548660 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="37739944-4511-4fe7-95df-09d42974532e" containerName="registry-server" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.548671 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7514bf7-ccdc-42f0-a159-78d12f91e55c" containerName="registry-server" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.549271 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c92b4" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.550891 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.557322 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-c92b4"] Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.580257 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91b2d0f6-ea36-4860-aa8a-2645a1a44741-catalog-content\") pod \"redhat-marketplace-c92b4\" (UID: \"91b2d0f6-ea36-4860-aa8a-2645a1a44741\") " pod="openshift-marketplace/redhat-marketplace-c92b4" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.580321 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ng8dj\" (UniqueName: \"kubernetes.io/projected/91b2d0f6-ea36-4860-aa8a-2645a1a44741-kube-api-access-ng8dj\") pod \"redhat-marketplace-c92b4\" (UID: \"91b2d0f6-ea36-4860-aa8a-2645a1a44741\") " pod="openshift-marketplace/redhat-marketplace-c92b4" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.580436 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91b2d0f6-ea36-4860-aa8a-2645a1a44741-utilities\") pod \"redhat-marketplace-c92b4\" (UID: \"91b2d0f6-ea36-4860-aa8a-2645a1a44741\") " pod="openshift-marketplace/redhat-marketplace-c92b4" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.681318 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91b2d0f6-ea36-4860-aa8a-2645a1a44741-utilities\") pod \"redhat-marketplace-c92b4\" (UID: \"91b2d0f6-ea36-4860-aa8a-2645a1a44741\") " pod="openshift-marketplace/redhat-marketplace-c92b4" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.681390 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91b2d0f6-ea36-4860-aa8a-2645a1a44741-catalog-content\") pod \"redhat-marketplace-c92b4\" (UID: \"91b2d0f6-ea36-4860-aa8a-2645a1a44741\") " pod="openshift-marketplace/redhat-marketplace-c92b4" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.681464 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ng8dj\" (UniqueName: \"kubernetes.io/projected/91b2d0f6-ea36-4860-aa8a-2645a1a44741-kube-api-access-ng8dj\") pod \"redhat-marketplace-c92b4\" (UID: \"91b2d0f6-ea36-4860-aa8a-2645a1a44741\") " pod="openshift-marketplace/redhat-marketplace-c92b4" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.681853 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91b2d0f6-ea36-4860-aa8a-2645a1a44741-utilities\") pod \"redhat-marketplace-c92b4\" (UID: \"91b2d0f6-ea36-4860-aa8a-2645a1a44741\") " pod="openshift-marketplace/redhat-marketplace-c92b4" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.681903 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91b2d0f6-ea36-4860-aa8a-2645a1a44741-catalog-content\") pod \"redhat-marketplace-c92b4\" (UID: \"91b2d0f6-ea36-4860-aa8a-2645a1a44741\") " pod="openshift-marketplace/redhat-marketplace-c92b4" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.707188 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ng8dj\" (UniqueName: \"kubernetes.io/projected/91b2d0f6-ea36-4860-aa8a-2645a1a44741-kube-api-access-ng8dj\") pod \"redhat-marketplace-c92b4\" (UID: \"91b2d0f6-ea36-4860-aa8a-2645a1a44741\") " pod="openshift-marketplace/redhat-marketplace-c92b4" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.743692 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-kf4pl"] Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.744792 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kf4pl" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.747956 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.756893 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kf4pl"] Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.865207 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c92b4" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.883428 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d398306-3543-4f30-90a9-1f39fd5c58e4-catalog-content\") pod \"redhat-operators-kf4pl\" (UID: \"4d398306-3543-4f30-90a9-1f39fd5c58e4\") " pod="openshift-marketplace/redhat-operators-kf4pl" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.883516 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d398306-3543-4f30-90a9-1f39fd5c58e4-utilities\") pod \"redhat-operators-kf4pl\" (UID: \"4d398306-3543-4f30-90a9-1f39fd5c58e4\") " pod="openshift-marketplace/redhat-operators-kf4pl" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.883676 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtmqt\" (UniqueName: \"kubernetes.io/projected/4d398306-3543-4f30-90a9-1f39fd5c58e4-kube-api-access-gtmqt\") pod \"redhat-operators-kf4pl\" (UID: \"4d398306-3543-4f30-90a9-1f39fd5c58e4\") " pod="openshift-marketplace/redhat-operators-kf4pl" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.985602 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtmqt\" (UniqueName: \"kubernetes.io/projected/4d398306-3543-4f30-90a9-1f39fd5c58e4-kube-api-access-gtmqt\") pod \"redhat-operators-kf4pl\" (UID: \"4d398306-3543-4f30-90a9-1f39fd5c58e4\") " pod="openshift-marketplace/redhat-operators-kf4pl" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.985744 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d398306-3543-4f30-90a9-1f39fd5c58e4-catalog-content\") pod \"redhat-operators-kf4pl\" (UID: \"4d398306-3543-4f30-90a9-1f39fd5c58e4\") " pod="openshift-marketplace/redhat-operators-kf4pl" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.985778 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d398306-3543-4f30-90a9-1f39fd5c58e4-utilities\") pod \"redhat-operators-kf4pl\" (UID: \"4d398306-3543-4f30-90a9-1f39fd5c58e4\") " pod="openshift-marketplace/redhat-operators-kf4pl" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.986135 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d398306-3543-4f30-90a9-1f39fd5c58e4-utilities\") pod \"redhat-operators-kf4pl\" (UID: \"4d398306-3543-4f30-90a9-1f39fd5c58e4\") " pod="openshift-marketplace/redhat-operators-kf4pl" Nov 24 13:22:30 crc kubenswrapper[5039]: I1124 13:22:30.986203 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d398306-3543-4f30-90a9-1f39fd5c58e4-catalog-content\") pod \"redhat-operators-kf4pl\" (UID: \"4d398306-3543-4f30-90a9-1f39fd5c58e4\") " pod="openshift-marketplace/redhat-operators-kf4pl" Nov 24 13:22:31 crc kubenswrapper[5039]: I1124 13:22:31.003297 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtmqt\" (UniqueName: \"kubernetes.io/projected/4d398306-3543-4f30-90a9-1f39fd5c58e4-kube-api-access-gtmqt\") pod \"redhat-operators-kf4pl\" (UID: \"4d398306-3543-4f30-90a9-1f39fd5c58e4\") " pod="openshift-marketplace/redhat-operators-kf4pl" Nov 24 13:22:31 crc kubenswrapper[5039]: I1124 13:22:31.067967 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-c92b4"] Nov 24 13:22:31 crc kubenswrapper[5039]: W1124 13:22:31.074788 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91b2d0f6_ea36_4860_aa8a_2645a1a44741.slice/crio-00e82d95bd9c4198f160e617458adf388310db6f698022853e1cb3da451d8fce WatchSource:0}: Error finding container 00e82d95bd9c4198f160e617458adf388310db6f698022853e1cb3da451d8fce: Status 404 returned error can't find the container with id 00e82d95bd9c4198f160e617458adf388310db6f698022853e1cb3da451d8fce Nov 24 13:22:31 crc kubenswrapper[5039]: I1124 13:22:31.075065 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kf4pl" Nov 24 13:22:31 crc kubenswrapper[5039]: I1124 13:22:31.255027 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kf4pl"] Nov 24 13:22:31 crc kubenswrapper[5039]: I1124 13:22:31.316992 5039 generic.go:334] "Generic (PLEG): container finished" podID="91b2d0f6-ea36-4860-aa8a-2645a1a44741" containerID="b5c3b6762f41a3d27c0fb0907f1429be3eda6369f29c7aa6c24a6b9b38359ae0" exitCode=0 Nov 24 13:22:31 crc kubenswrapper[5039]: I1124 13:22:31.317069 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c92b4" event={"ID":"91b2d0f6-ea36-4860-aa8a-2645a1a44741","Type":"ContainerDied","Data":"b5c3b6762f41a3d27c0fb0907f1429be3eda6369f29c7aa6c24a6b9b38359ae0"} Nov 24 13:22:31 crc kubenswrapper[5039]: I1124 13:22:31.317158 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c92b4" event={"ID":"91b2d0f6-ea36-4860-aa8a-2645a1a44741","Type":"ContainerStarted","Data":"00e82d95bd9c4198f160e617458adf388310db6f698022853e1cb3da451d8fce"} Nov 24 13:22:32 crc kubenswrapper[5039]: I1124 13:22:32.321530 5039 generic.go:334] "Generic (PLEG): container finished" podID="4d398306-3543-4f30-90a9-1f39fd5c58e4" containerID="6d70e3628a71e0e873173b796fe1badfae50972547e093e3f2dc136a49dfa41a" exitCode=0 Nov 24 13:22:32 crc kubenswrapper[5039]: I1124 13:22:32.321568 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kf4pl" event={"ID":"4d398306-3543-4f30-90a9-1f39fd5c58e4","Type":"ContainerDied","Data":"6d70e3628a71e0e873173b796fe1badfae50972547e093e3f2dc136a49dfa41a"} Nov 24 13:22:32 crc kubenswrapper[5039]: I1124 13:22:32.321936 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kf4pl" event={"ID":"4d398306-3543-4f30-90a9-1f39fd5c58e4","Type":"ContainerStarted","Data":"258d825aab874221699eccf2a17116ed99e5692a23d6809f9662c346082fa174"} Nov 24 13:22:32 crc kubenswrapper[5039]: I1124 13:22:32.325701 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c92b4" event={"ID":"91b2d0f6-ea36-4860-aa8a-2645a1a44741","Type":"ContainerStarted","Data":"9d55e7e0398d62812c0e234352fbfa41565520c66252ab57e90fa57298616e13"} Nov 24 13:22:32 crc kubenswrapper[5039]: I1124 13:22:32.946956 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-nqqfb"] Nov 24 13:22:32 crc kubenswrapper[5039]: I1124 13:22:32.949019 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nqqfb" Nov 24 13:22:32 crc kubenswrapper[5039]: I1124 13:22:32.953291 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 24 13:22:32 crc kubenswrapper[5039]: I1124 13:22:32.959949 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nqqfb"] Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.007644 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b19d8ea-3c77-43f4-b236-9d5ee2edbafd-utilities\") pod \"certified-operators-nqqfb\" (UID: \"4b19d8ea-3c77-43f4-b236-9d5ee2edbafd\") " pod="openshift-marketplace/certified-operators-nqqfb" Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.007685 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9m7p\" (UniqueName: \"kubernetes.io/projected/4b19d8ea-3c77-43f4-b236-9d5ee2edbafd-kube-api-access-d9m7p\") pod \"certified-operators-nqqfb\" (UID: \"4b19d8ea-3c77-43f4-b236-9d5ee2edbafd\") " pod="openshift-marketplace/certified-operators-nqqfb" Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.007779 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b19d8ea-3c77-43f4-b236-9d5ee2edbafd-catalog-content\") pod \"certified-operators-nqqfb\" (UID: \"4b19d8ea-3c77-43f4-b236-9d5ee2edbafd\") " pod="openshift-marketplace/certified-operators-nqqfb" Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.108746 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b19d8ea-3c77-43f4-b236-9d5ee2edbafd-catalog-content\") pod \"certified-operators-nqqfb\" (UID: \"4b19d8ea-3c77-43f4-b236-9d5ee2edbafd\") " pod="openshift-marketplace/certified-operators-nqqfb" Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.108813 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b19d8ea-3c77-43f4-b236-9d5ee2edbafd-utilities\") pod \"certified-operators-nqqfb\" (UID: \"4b19d8ea-3c77-43f4-b236-9d5ee2edbafd\") " pod="openshift-marketplace/certified-operators-nqqfb" Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.108842 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9m7p\" (UniqueName: \"kubernetes.io/projected/4b19d8ea-3c77-43f4-b236-9d5ee2edbafd-kube-api-access-d9m7p\") pod \"certified-operators-nqqfb\" (UID: \"4b19d8ea-3c77-43f4-b236-9d5ee2edbafd\") " pod="openshift-marketplace/certified-operators-nqqfb" Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.109318 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b19d8ea-3c77-43f4-b236-9d5ee2edbafd-utilities\") pod \"certified-operators-nqqfb\" (UID: \"4b19d8ea-3c77-43f4-b236-9d5ee2edbafd\") " pod="openshift-marketplace/certified-operators-nqqfb" Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.109318 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b19d8ea-3c77-43f4-b236-9d5ee2edbafd-catalog-content\") pod \"certified-operators-nqqfb\" (UID: \"4b19d8ea-3c77-43f4-b236-9d5ee2edbafd\") " pod="openshift-marketplace/certified-operators-nqqfb" Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.127749 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9m7p\" (UniqueName: \"kubernetes.io/projected/4b19d8ea-3c77-43f4-b236-9d5ee2edbafd-kube-api-access-d9m7p\") pod \"certified-operators-nqqfb\" (UID: \"4b19d8ea-3c77-43f4-b236-9d5ee2edbafd\") " pod="openshift-marketplace/certified-operators-nqqfb" Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.144930 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qr7w8"] Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.146126 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qr7w8" Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.148338 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.155197 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qr7w8"] Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.268316 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nqqfb" Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.311490 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gwkrq\" (UniqueName: \"kubernetes.io/projected/f3688350-42c7-4e8e-886d-e4d3c718221f-kube-api-access-gwkrq\") pod \"community-operators-qr7w8\" (UID: \"f3688350-42c7-4e8e-886d-e4d3c718221f\") " pod="openshift-marketplace/community-operators-qr7w8" Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.314806 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3688350-42c7-4e8e-886d-e4d3c718221f-catalog-content\") pod \"community-operators-qr7w8\" (UID: \"f3688350-42c7-4e8e-886d-e4d3c718221f\") " pod="openshift-marketplace/community-operators-qr7w8" Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.314893 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3688350-42c7-4e8e-886d-e4d3c718221f-utilities\") pod \"community-operators-qr7w8\" (UID: \"f3688350-42c7-4e8e-886d-e4d3c718221f\") " pod="openshift-marketplace/community-operators-qr7w8" Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.334936 5039 generic.go:334] "Generic (PLEG): container finished" podID="91b2d0f6-ea36-4860-aa8a-2645a1a44741" containerID="9d55e7e0398d62812c0e234352fbfa41565520c66252ab57e90fa57298616e13" exitCode=0 Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.335049 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c92b4" event={"ID":"91b2d0f6-ea36-4860-aa8a-2645a1a44741","Type":"ContainerDied","Data":"9d55e7e0398d62812c0e234352fbfa41565520c66252ab57e90fa57298616e13"} Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.343700 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kf4pl" event={"ID":"4d398306-3543-4f30-90a9-1f39fd5c58e4","Type":"ContainerStarted","Data":"5cce04a09b2f9520dc740cfbd6abe92716ee6394ff8322389aeb73a316dc9fbc"} Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.416693 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3688350-42c7-4e8e-886d-e4d3c718221f-catalog-content\") pod \"community-operators-qr7w8\" (UID: \"f3688350-42c7-4e8e-886d-e4d3c718221f\") " pod="openshift-marketplace/community-operators-qr7w8" Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.416737 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3688350-42c7-4e8e-886d-e4d3c718221f-utilities\") pod \"community-operators-qr7w8\" (UID: \"f3688350-42c7-4e8e-886d-e4d3c718221f\") " pod="openshift-marketplace/community-operators-qr7w8" Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.416800 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gwkrq\" (UniqueName: \"kubernetes.io/projected/f3688350-42c7-4e8e-886d-e4d3c718221f-kube-api-access-gwkrq\") pod \"community-operators-qr7w8\" (UID: \"f3688350-42c7-4e8e-886d-e4d3c718221f\") " pod="openshift-marketplace/community-operators-qr7w8" Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.417189 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3688350-42c7-4e8e-886d-e4d3c718221f-catalog-content\") pod \"community-operators-qr7w8\" (UID: \"f3688350-42c7-4e8e-886d-e4d3c718221f\") " pod="openshift-marketplace/community-operators-qr7w8" Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.417351 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3688350-42c7-4e8e-886d-e4d3c718221f-utilities\") pod \"community-operators-qr7w8\" (UID: \"f3688350-42c7-4e8e-886d-e4d3c718221f\") " pod="openshift-marketplace/community-operators-qr7w8" Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.437103 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gwkrq\" (UniqueName: \"kubernetes.io/projected/f3688350-42c7-4e8e-886d-e4d3c718221f-kube-api-access-gwkrq\") pod \"community-operators-qr7w8\" (UID: \"f3688350-42c7-4e8e-886d-e4d3c718221f\") " pod="openshift-marketplace/community-operators-qr7w8" Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.495581 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nqqfb"] Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.501711 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qr7w8" Nov 24 13:22:33 crc kubenswrapper[5039]: W1124 13:22:33.510126 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4b19d8ea_3c77_43f4_b236_9d5ee2edbafd.slice/crio-5417fa8cd7d0447347030496184abe980c3baf292fbb883dfabb943cf842e288 WatchSource:0}: Error finding container 5417fa8cd7d0447347030496184abe980c3baf292fbb883dfabb943cf842e288: Status 404 returned error can't find the container with id 5417fa8cd7d0447347030496184abe980c3baf292fbb883dfabb943cf842e288 Nov 24 13:22:33 crc kubenswrapper[5039]: I1124 13:22:33.674940 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qr7w8"] Nov 24 13:22:33 crc kubenswrapper[5039]: W1124 13:22:33.722101 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf3688350_42c7_4e8e_886d_e4d3c718221f.slice/crio-d129708db0749f5552f3d3b18fc192df85b12a251a425a0b9af606c295e9fc4a WatchSource:0}: Error finding container d129708db0749f5552f3d3b18fc192df85b12a251a425a0b9af606c295e9fc4a: Status 404 returned error can't find the container with id d129708db0749f5552f3d3b18fc192df85b12a251a425a0b9af606c295e9fc4a Nov 24 13:22:34 crc kubenswrapper[5039]: I1124 13:22:34.355983 5039 generic.go:334] "Generic (PLEG): container finished" podID="f3688350-42c7-4e8e-886d-e4d3c718221f" containerID="452390df03b3a575c0a473b41ee9ad4d3ea5952a126283be9bbd4e001c716ec6" exitCode=0 Nov 24 13:22:34 crc kubenswrapper[5039]: I1124 13:22:34.356220 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qr7w8" event={"ID":"f3688350-42c7-4e8e-886d-e4d3c718221f","Type":"ContainerDied","Data":"452390df03b3a575c0a473b41ee9ad4d3ea5952a126283be9bbd4e001c716ec6"} Nov 24 13:22:34 crc kubenswrapper[5039]: I1124 13:22:34.356245 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qr7w8" event={"ID":"f3688350-42c7-4e8e-886d-e4d3c718221f","Type":"ContainerStarted","Data":"d129708db0749f5552f3d3b18fc192df85b12a251a425a0b9af606c295e9fc4a"} Nov 24 13:22:34 crc kubenswrapper[5039]: I1124 13:22:34.359388 5039 generic.go:334] "Generic (PLEG): container finished" podID="4b19d8ea-3c77-43f4-b236-9d5ee2edbafd" containerID="ecae051b4a6c33352fbbcb9aec28c65d69a6321527e6c50425106ddfe6bfda08" exitCode=0 Nov 24 13:22:34 crc kubenswrapper[5039]: I1124 13:22:34.359467 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nqqfb" event={"ID":"4b19d8ea-3c77-43f4-b236-9d5ee2edbafd","Type":"ContainerDied","Data":"ecae051b4a6c33352fbbcb9aec28c65d69a6321527e6c50425106ddfe6bfda08"} Nov 24 13:22:34 crc kubenswrapper[5039]: I1124 13:22:34.359498 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nqqfb" event={"ID":"4b19d8ea-3c77-43f4-b236-9d5ee2edbafd","Type":"ContainerStarted","Data":"5417fa8cd7d0447347030496184abe980c3baf292fbb883dfabb943cf842e288"} Nov 24 13:22:34 crc kubenswrapper[5039]: I1124 13:22:34.362103 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c92b4" event={"ID":"91b2d0f6-ea36-4860-aa8a-2645a1a44741","Type":"ContainerStarted","Data":"6087b55504d6ee9f2f682e54248495f7d35de75719066914b55365afac1ed42d"} Nov 24 13:22:34 crc kubenswrapper[5039]: I1124 13:22:34.364746 5039 generic.go:334] "Generic (PLEG): container finished" podID="4d398306-3543-4f30-90a9-1f39fd5c58e4" containerID="5cce04a09b2f9520dc740cfbd6abe92716ee6394ff8322389aeb73a316dc9fbc" exitCode=0 Nov 24 13:22:34 crc kubenswrapper[5039]: I1124 13:22:34.364776 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kf4pl" event={"ID":"4d398306-3543-4f30-90a9-1f39fd5c58e4","Type":"ContainerDied","Data":"5cce04a09b2f9520dc740cfbd6abe92716ee6394ff8322389aeb73a316dc9fbc"} Nov 24 13:22:34 crc kubenswrapper[5039]: I1124 13:22:34.397658 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-c92b4" podStartSLOduration=1.890710224 podStartE2EDuration="4.397638417s" podCreationTimestamp="2025-11-24 13:22:30 +0000 UTC" firstStartedPulling="2025-11-24 13:22:31.318939993 +0000 UTC m=+263.758064493" lastFinishedPulling="2025-11-24 13:22:33.825868186 +0000 UTC m=+266.264992686" observedRunningTime="2025-11-24 13:22:34.392467361 +0000 UTC m=+266.831591861" watchObservedRunningTime="2025-11-24 13:22:34.397638417 +0000 UTC m=+266.836762907" Nov 24 13:22:35 crc kubenswrapper[5039]: I1124 13:22:35.374288 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kf4pl" event={"ID":"4d398306-3543-4f30-90a9-1f39fd5c58e4","Type":"ContainerStarted","Data":"0d0b895a076de61f0fb370c78da0712ebe2a20b5b0e7b3ef0056d3d9d07e2b74"} Nov 24 13:22:35 crc kubenswrapper[5039]: I1124 13:22:35.376088 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qr7w8" event={"ID":"f3688350-42c7-4e8e-886d-e4d3c718221f","Type":"ContainerStarted","Data":"e95802098afe31499de8bd68d93106457377b3dcef6e1aa39012b850812bda26"} Nov 24 13:22:35 crc kubenswrapper[5039]: I1124 13:22:35.377850 5039 generic.go:334] "Generic (PLEG): container finished" podID="4b19d8ea-3c77-43f4-b236-9d5ee2edbafd" containerID="e201757b3b9da69b7228bec117d5b894806859b43835cab08b39aab75df736f5" exitCode=0 Nov 24 13:22:35 crc kubenswrapper[5039]: I1124 13:22:35.377879 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nqqfb" event={"ID":"4b19d8ea-3c77-43f4-b236-9d5ee2edbafd","Type":"ContainerDied","Data":"e201757b3b9da69b7228bec117d5b894806859b43835cab08b39aab75df736f5"} Nov 24 13:22:35 crc kubenswrapper[5039]: I1124 13:22:35.395824 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-kf4pl" podStartSLOduration=2.994212252 podStartE2EDuration="5.395808271s" podCreationTimestamp="2025-11-24 13:22:30 +0000 UTC" firstStartedPulling="2025-11-24 13:22:32.323139226 +0000 UTC m=+264.762263726" lastFinishedPulling="2025-11-24 13:22:34.724735245 +0000 UTC m=+267.163859745" observedRunningTime="2025-11-24 13:22:35.393563362 +0000 UTC m=+267.832687892" watchObservedRunningTime="2025-11-24 13:22:35.395808271 +0000 UTC m=+267.834932771" Nov 24 13:22:36 crc kubenswrapper[5039]: I1124 13:22:36.399929 5039 generic.go:334] "Generic (PLEG): container finished" podID="f3688350-42c7-4e8e-886d-e4d3c718221f" containerID="e95802098afe31499de8bd68d93106457377b3dcef6e1aa39012b850812bda26" exitCode=0 Nov 24 13:22:36 crc kubenswrapper[5039]: I1124 13:22:36.399998 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qr7w8" event={"ID":"f3688350-42c7-4e8e-886d-e4d3c718221f","Type":"ContainerDied","Data":"e95802098afe31499de8bd68d93106457377b3dcef6e1aa39012b850812bda26"} Nov 24 13:22:36 crc kubenswrapper[5039]: I1124 13:22:36.413480 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nqqfb" event={"ID":"4b19d8ea-3c77-43f4-b236-9d5ee2edbafd","Type":"ContainerStarted","Data":"91c98b76ca53b44d70a15558f35ad1fdc1202198d17aa30c890ebbf5f24df483"} Nov 24 13:22:36 crc kubenswrapper[5039]: I1124 13:22:36.436166 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-nqqfb" podStartSLOduration=2.899119043 podStartE2EDuration="4.436145338s" podCreationTimestamp="2025-11-24 13:22:32 +0000 UTC" firstStartedPulling="2025-11-24 13:22:34.361843912 +0000 UTC m=+266.800968412" lastFinishedPulling="2025-11-24 13:22:35.898870207 +0000 UTC m=+268.337994707" observedRunningTime="2025-11-24 13:22:36.433828967 +0000 UTC m=+268.872953467" watchObservedRunningTime="2025-11-24 13:22:36.436145338 +0000 UTC m=+268.875269848" Nov 24 13:22:38 crc kubenswrapper[5039]: I1124 13:22:38.435547 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qr7w8" event={"ID":"f3688350-42c7-4e8e-886d-e4d3c718221f","Type":"ContainerStarted","Data":"39fb1e4afc6a9a97324b83d503ef1f71cef1adc38d1446d129690b204ac32b45"} Nov 24 13:22:38 crc kubenswrapper[5039]: I1124 13:22:38.461138 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qr7w8" podStartSLOduration=2.681545768 podStartE2EDuration="5.461118305s" podCreationTimestamp="2025-11-24 13:22:33 +0000 UTC" firstStartedPulling="2025-11-24 13:22:34.357883928 +0000 UTC m=+266.797008438" lastFinishedPulling="2025-11-24 13:22:37.137456475 +0000 UTC m=+269.576580975" observedRunningTime="2025-11-24 13:22:38.458480786 +0000 UTC m=+270.897605286" watchObservedRunningTime="2025-11-24 13:22:38.461118305 +0000 UTC m=+270.900242825" Nov 24 13:22:40 crc kubenswrapper[5039]: I1124 13:22:40.866037 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-c92b4" Nov 24 13:22:40 crc kubenswrapper[5039]: I1124 13:22:40.866133 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-c92b4" Nov 24 13:22:40 crc kubenswrapper[5039]: I1124 13:22:40.918087 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-c92b4" Nov 24 13:22:41 crc kubenswrapper[5039]: I1124 13:22:41.075261 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-kf4pl" Nov 24 13:22:41 crc kubenswrapper[5039]: I1124 13:22:41.075309 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-kf4pl" Nov 24 13:22:41 crc kubenswrapper[5039]: I1124 13:22:41.122452 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-kf4pl" Nov 24 13:22:41 crc kubenswrapper[5039]: I1124 13:22:41.497812 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-c92b4" Nov 24 13:22:41 crc kubenswrapper[5039]: I1124 13:22:41.498174 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-kf4pl" Nov 24 13:22:43 crc kubenswrapper[5039]: I1124 13:22:43.268999 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-nqqfb" Nov 24 13:22:43 crc kubenswrapper[5039]: I1124 13:22:43.269278 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-nqqfb" Nov 24 13:22:43 crc kubenswrapper[5039]: I1124 13:22:43.325473 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-nqqfb" Nov 24 13:22:43 crc kubenswrapper[5039]: I1124 13:22:43.492394 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-nqqfb" Nov 24 13:22:43 crc kubenswrapper[5039]: I1124 13:22:43.502565 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qr7w8" Nov 24 13:22:43 crc kubenswrapper[5039]: I1124 13:22:43.502806 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qr7w8" Nov 24 13:22:43 crc kubenswrapper[5039]: I1124 13:22:43.557213 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qr7w8" Nov 24 13:22:44 crc kubenswrapper[5039]: I1124 13:22:44.494512 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qr7w8" Nov 24 13:23:50 crc kubenswrapper[5039]: I1124 13:23:50.102037 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:23:50 crc kubenswrapper[5039]: I1124 13:23:50.102797 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:24:20 crc kubenswrapper[5039]: I1124 13:24:20.101698 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:24:20 crc kubenswrapper[5039]: I1124 13:24:20.102426 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:24:50 crc kubenswrapper[5039]: I1124 13:24:50.102104 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:24:50 crc kubenswrapper[5039]: I1124 13:24:50.104491 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:24:50 crc kubenswrapper[5039]: I1124 13:24:50.104769 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:24:50 crc kubenswrapper[5039]: I1124 13:24:50.105876 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ded1533ed2e79a9e9a4e41890abd61e8a4fb5cb75217c4bf2484efe880ce1e04"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 13:24:50 crc kubenswrapper[5039]: I1124 13:24:50.106210 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://ded1533ed2e79a9e9a4e41890abd61e8a4fb5cb75217c4bf2484efe880ce1e04" gracePeriod=600 Nov 24 13:24:50 crc kubenswrapper[5039]: I1124 13:24:50.561278 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="ded1533ed2e79a9e9a4e41890abd61e8a4fb5cb75217c4bf2484efe880ce1e04" exitCode=0 Nov 24 13:24:50 crc kubenswrapper[5039]: I1124 13:24:50.561397 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"ded1533ed2e79a9e9a4e41890abd61e8a4fb5cb75217c4bf2484efe880ce1e04"} Nov 24 13:24:50 crc kubenswrapper[5039]: I1124 13:24:50.561816 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"d1dbbf4f80166d075183d0109aa48d3369a50b433b63d157f67b87eea163a9c1"} Nov 24 13:24:50 crc kubenswrapper[5039]: I1124 13:24:50.561854 5039 scope.go:117] "RemoveContainer" containerID="f8adc2fd5e46f78365bd869341d8153097f7143bbea3b2d9c45bb1e0f347b488" Nov 24 13:25:44 crc kubenswrapper[5039]: I1124 13:25:44.860278 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-25rjg"] Nov 24 13:25:44 crc kubenswrapper[5039]: I1124 13:25:44.861695 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:44 crc kubenswrapper[5039]: I1124 13:25:44.874663 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-25rjg"] Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.012813 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cfa60652-42f6-4f7f-9e0e-006d1e954a23-ca-trust-extracted\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.012875 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45xjh\" (UniqueName: \"kubernetes.io/projected/cfa60652-42f6-4f7f-9e0e-006d1e954a23-kube-api-access-45xjh\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.012898 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cfa60652-42f6-4f7f-9e0e-006d1e954a23-installation-pull-secrets\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.013071 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cfa60652-42f6-4f7f-9e0e-006d1e954a23-registry-tls\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.013150 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cfa60652-42f6-4f7f-9e0e-006d1e954a23-registry-certificates\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.013188 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cfa60652-42f6-4f7f-9e0e-006d1e954a23-trusted-ca\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.013259 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.013285 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cfa60652-42f6-4f7f-9e0e-006d1e954a23-bound-sa-token\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.031211 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.114411 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cfa60652-42f6-4f7f-9e0e-006d1e954a23-installation-pull-secrets\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.114580 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cfa60652-42f6-4f7f-9e0e-006d1e954a23-registry-tls\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.114636 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cfa60652-42f6-4f7f-9e0e-006d1e954a23-registry-certificates\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.114668 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cfa60652-42f6-4f7f-9e0e-006d1e954a23-trusted-ca\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.114716 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cfa60652-42f6-4f7f-9e0e-006d1e954a23-bound-sa-token\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.114787 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cfa60652-42f6-4f7f-9e0e-006d1e954a23-ca-trust-extracted\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.114828 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45xjh\" (UniqueName: \"kubernetes.io/projected/cfa60652-42f6-4f7f-9e0e-006d1e954a23-kube-api-access-45xjh\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.115843 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cfa60652-42f6-4f7f-9e0e-006d1e954a23-ca-trust-extracted\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.116129 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cfa60652-42f6-4f7f-9e0e-006d1e954a23-trusted-ca\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.117444 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cfa60652-42f6-4f7f-9e0e-006d1e954a23-registry-certificates\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.122463 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cfa60652-42f6-4f7f-9e0e-006d1e954a23-installation-pull-secrets\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.122951 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cfa60652-42f6-4f7f-9e0e-006d1e954a23-registry-tls\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.130861 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cfa60652-42f6-4f7f-9e0e-006d1e954a23-bound-sa-token\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.140572 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45xjh\" (UniqueName: \"kubernetes.io/projected/cfa60652-42f6-4f7f-9e0e-006d1e954a23-kube-api-access-45xjh\") pod \"image-registry-66df7c8f76-25rjg\" (UID: \"cfa60652-42f6-4f7f-9e0e-006d1e954a23\") " pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.177563 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.367006 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-25rjg"] Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.908187 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" event={"ID":"cfa60652-42f6-4f7f-9e0e-006d1e954a23","Type":"ContainerStarted","Data":"3a0697c7772ca6d02bb90b8c619260b73ee2736f968f7d8871b00b683cb03344"} Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.908268 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" event={"ID":"cfa60652-42f6-4f7f-9e0e-006d1e954a23","Type":"ContainerStarted","Data":"df16586490162365a75480784acded59d628bd8cffd9fd1c8d0f6989cd92edfe"} Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.908405 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:25:45 crc kubenswrapper[5039]: I1124 13:25:45.930288 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" podStartSLOduration=1.930259637 podStartE2EDuration="1.930259637s" podCreationTimestamp="2025-11-24 13:25:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:25:45.92480803 +0000 UTC m=+458.363932620" watchObservedRunningTime="2025-11-24 13:25:45.930259637 +0000 UTC m=+458.369384177" Nov 24 13:26:05 crc kubenswrapper[5039]: I1124 13:26:05.185776 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-25rjg" Nov 24 13:26:05 crc kubenswrapper[5039]: I1124 13:26:05.264958 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2swfx"] Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.308670 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" podUID="750f36ae-2e78-4a6d-8e78-e315d507d436" containerName="registry" containerID="cri-o://50f2be6ae811de5262a20e8dc6d240c2d70084bc8cd64e3ac2238a4e04728f44" gracePeriod=30 Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.654592 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.680744 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/750f36ae-2e78-4a6d-8e78-e315d507d436-trusted-ca\") pod \"750f36ae-2e78-4a6d-8e78-e315d507d436\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.680823 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/750f36ae-2e78-4a6d-8e78-e315d507d436-installation-pull-secrets\") pod \"750f36ae-2e78-4a6d-8e78-e315d507d436\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.680865 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/750f36ae-2e78-4a6d-8e78-e315d507d436-registry-tls\") pod \"750f36ae-2e78-4a6d-8e78-e315d507d436\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.680898 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/750f36ae-2e78-4a6d-8e78-e315d507d436-ca-trust-extracted\") pod \"750f36ae-2e78-4a6d-8e78-e315d507d436\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.680931 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/750f36ae-2e78-4a6d-8e78-e315d507d436-registry-certificates\") pod \"750f36ae-2e78-4a6d-8e78-e315d507d436\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.681259 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"750f36ae-2e78-4a6d-8e78-e315d507d436\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.681304 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/750f36ae-2e78-4a6d-8e78-e315d507d436-bound-sa-token\") pod \"750f36ae-2e78-4a6d-8e78-e315d507d436\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.681355 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x6hg5\" (UniqueName: \"kubernetes.io/projected/750f36ae-2e78-4a6d-8e78-e315d507d436-kube-api-access-x6hg5\") pod \"750f36ae-2e78-4a6d-8e78-e315d507d436\" (UID: \"750f36ae-2e78-4a6d-8e78-e315d507d436\") " Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.682433 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/750f36ae-2e78-4a6d-8e78-e315d507d436-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "750f36ae-2e78-4a6d-8e78-e315d507d436" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.683485 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/750f36ae-2e78-4a6d-8e78-e315d507d436-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "750f36ae-2e78-4a6d-8e78-e315d507d436" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.684470 5039 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/750f36ae-2e78-4a6d-8e78-e315d507d436-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.684592 5039 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/750f36ae-2e78-4a6d-8e78-e315d507d436-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.697088 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/750f36ae-2e78-4a6d-8e78-e315d507d436-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "750f36ae-2e78-4a6d-8e78-e315d507d436" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.697110 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/750f36ae-2e78-4a6d-8e78-e315d507d436-kube-api-access-x6hg5" (OuterVolumeSpecName: "kube-api-access-x6hg5") pod "750f36ae-2e78-4a6d-8e78-e315d507d436" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436"). InnerVolumeSpecName "kube-api-access-x6hg5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.697997 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/750f36ae-2e78-4a6d-8e78-e315d507d436-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "750f36ae-2e78-4a6d-8e78-e315d507d436" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.699039 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/750f36ae-2e78-4a6d-8e78-e315d507d436-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "750f36ae-2e78-4a6d-8e78-e315d507d436" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.699241 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "750f36ae-2e78-4a6d-8e78-e315d507d436" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.712354 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/750f36ae-2e78-4a6d-8e78-e315d507d436-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "750f36ae-2e78-4a6d-8e78-e315d507d436" (UID: "750f36ae-2e78-4a6d-8e78-e315d507d436"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.785680 5039 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/750f36ae-2e78-4a6d-8e78-e315d507d436-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.785738 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x6hg5\" (UniqueName: \"kubernetes.io/projected/750f36ae-2e78-4a6d-8e78-e315d507d436-kube-api-access-x6hg5\") on node \"crc\" DevicePath \"\"" Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.785764 5039 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/750f36ae-2e78-4a6d-8e78-e315d507d436-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.785782 5039 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/750f36ae-2e78-4a6d-8e78-e315d507d436-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 24 13:26:30 crc kubenswrapper[5039]: I1124 13:26:30.785800 5039 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/750f36ae-2e78-4a6d-8e78-e315d507d436-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 24 13:26:31 crc kubenswrapper[5039]: I1124 13:26:31.196047 5039 generic.go:334] "Generic (PLEG): container finished" podID="750f36ae-2e78-4a6d-8e78-e315d507d436" containerID="50f2be6ae811de5262a20e8dc6d240c2d70084bc8cd64e3ac2238a4e04728f44" exitCode=0 Nov 24 13:26:31 crc kubenswrapper[5039]: I1124 13:26:31.196112 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" Nov 24 13:26:31 crc kubenswrapper[5039]: I1124 13:26:31.196113 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" event={"ID":"750f36ae-2e78-4a6d-8e78-e315d507d436","Type":"ContainerDied","Data":"50f2be6ae811de5262a20e8dc6d240c2d70084bc8cd64e3ac2238a4e04728f44"} Nov 24 13:26:31 crc kubenswrapper[5039]: I1124 13:26:31.196267 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-2swfx" event={"ID":"750f36ae-2e78-4a6d-8e78-e315d507d436","Type":"ContainerDied","Data":"ecb8280c7619e91ed039ceaa623b2113a26220346cf2e2777c9212bf8c2549df"} Nov 24 13:26:31 crc kubenswrapper[5039]: I1124 13:26:31.196313 5039 scope.go:117] "RemoveContainer" containerID="50f2be6ae811de5262a20e8dc6d240c2d70084bc8cd64e3ac2238a4e04728f44" Nov 24 13:26:31 crc kubenswrapper[5039]: I1124 13:26:31.222928 5039 scope.go:117] "RemoveContainer" containerID="50f2be6ae811de5262a20e8dc6d240c2d70084bc8cd64e3ac2238a4e04728f44" Nov 24 13:26:31 crc kubenswrapper[5039]: E1124 13:26:31.223463 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"50f2be6ae811de5262a20e8dc6d240c2d70084bc8cd64e3ac2238a4e04728f44\": container with ID starting with 50f2be6ae811de5262a20e8dc6d240c2d70084bc8cd64e3ac2238a4e04728f44 not found: ID does not exist" containerID="50f2be6ae811de5262a20e8dc6d240c2d70084bc8cd64e3ac2238a4e04728f44" Nov 24 13:26:31 crc kubenswrapper[5039]: I1124 13:26:31.223531 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50f2be6ae811de5262a20e8dc6d240c2d70084bc8cd64e3ac2238a4e04728f44"} err="failed to get container status \"50f2be6ae811de5262a20e8dc6d240c2d70084bc8cd64e3ac2238a4e04728f44\": rpc error: code = NotFound desc = could not find container \"50f2be6ae811de5262a20e8dc6d240c2d70084bc8cd64e3ac2238a4e04728f44\": container with ID starting with 50f2be6ae811de5262a20e8dc6d240c2d70084bc8cd64e3ac2238a4e04728f44 not found: ID does not exist" Nov 24 13:26:31 crc kubenswrapper[5039]: I1124 13:26:31.236572 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2swfx"] Nov 24 13:26:31 crc kubenswrapper[5039]: I1124 13:26:31.240319 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2swfx"] Nov 24 13:26:32 crc kubenswrapper[5039]: I1124 13:26:32.314968 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="750f36ae-2e78-4a6d-8e78-e315d507d436" path="/var/lib/kubelet/pods/750f36ae-2e78-4a6d-8e78-e315d507d436/volumes" Nov 24 13:26:50 crc kubenswrapper[5039]: I1124 13:26:50.101968 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:26:50 crc kubenswrapper[5039]: I1124 13:26:50.102668 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:27:20 crc kubenswrapper[5039]: I1124 13:27:20.101480 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:27:20 crc kubenswrapper[5039]: I1124 13:27:20.101963 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:27:50 crc kubenswrapper[5039]: I1124 13:27:50.101218 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:27:50 crc kubenswrapper[5039]: I1124 13:27:50.101831 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:27:50 crc kubenswrapper[5039]: I1124 13:27:50.101895 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:27:50 crc kubenswrapper[5039]: I1124 13:27:50.102642 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d1dbbf4f80166d075183d0109aa48d3369a50b433b63d157f67b87eea163a9c1"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 13:27:50 crc kubenswrapper[5039]: I1124 13:27:50.102734 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://d1dbbf4f80166d075183d0109aa48d3369a50b433b63d157f67b87eea163a9c1" gracePeriod=600 Nov 24 13:27:50 crc kubenswrapper[5039]: I1124 13:27:50.690151 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="d1dbbf4f80166d075183d0109aa48d3369a50b433b63d157f67b87eea163a9c1" exitCode=0 Nov 24 13:27:50 crc kubenswrapper[5039]: I1124 13:27:50.690257 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"d1dbbf4f80166d075183d0109aa48d3369a50b433b63d157f67b87eea163a9c1"} Nov 24 13:27:50 crc kubenswrapper[5039]: I1124 13:27:50.690524 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"ad5ffc63035c78c438991177870b3e0e28e428524aad180cebafc49a63fbdb72"} Nov 24 13:27:50 crc kubenswrapper[5039]: I1124 13:27:50.690548 5039 scope.go:117] "RemoveContainer" containerID="ded1533ed2e79a9e9a4e41890abd61e8a4fb5cb75217c4bf2484efe880ce1e04" Nov 24 13:28:07 crc kubenswrapper[5039]: I1124 13:28:07.026415 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b"] Nov 24 13:28:07 crc kubenswrapper[5039]: E1124 13:28:07.027282 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="750f36ae-2e78-4a6d-8e78-e315d507d436" containerName="registry" Nov 24 13:28:07 crc kubenswrapper[5039]: I1124 13:28:07.027299 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="750f36ae-2e78-4a6d-8e78-e315d507d436" containerName="registry" Nov 24 13:28:07 crc kubenswrapper[5039]: I1124 13:28:07.027449 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="750f36ae-2e78-4a6d-8e78-e315d507d436" containerName="registry" Nov 24 13:28:07 crc kubenswrapper[5039]: I1124 13:28:07.028541 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b" Nov 24 13:28:07 crc kubenswrapper[5039]: I1124 13:28:07.032148 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 24 13:28:07 crc kubenswrapper[5039]: I1124 13:28:07.043523 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b"] Nov 24 13:28:07 crc kubenswrapper[5039]: I1124 13:28:07.179377 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f60bd1ab-ddc1-462f-85f9-e47d7305727d-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b\" (UID: \"f60bd1ab-ddc1-462f-85f9-e47d7305727d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b" Nov 24 13:28:07 crc kubenswrapper[5039]: I1124 13:28:07.179486 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f60bd1ab-ddc1-462f-85f9-e47d7305727d-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b\" (UID: \"f60bd1ab-ddc1-462f-85f9-e47d7305727d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b" Nov 24 13:28:07 crc kubenswrapper[5039]: I1124 13:28:07.179587 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txff6\" (UniqueName: \"kubernetes.io/projected/f60bd1ab-ddc1-462f-85f9-e47d7305727d-kube-api-access-txff6\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b\" (UID: \"f60bd1ab-ddc1-462f-85f9-e47d7305727d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b" Nov 24 13:28:07 crc kubenswrapper[5039]: I1124 13:28:07.280634 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f60bd1ab-ddc1-462f-85f9-e47d7305727d-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b\" (UID: \"f60bd1ab-ddc1-462f-85f9-e47d7305727d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b" Nov 24 13:28:07 crc kubenswrapper[5039]: I1124 13:28:07.281036 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f60bd1ab-ddc1-462f-85f9-e47d7305727d-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b\" (UID: \"f60bd1ab-ddc1-462f-85f9-e47d7305727d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b" Nov 24 13:28:07 crc kubenswrapper[5039]: I1124 13:28:07.281103 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txff6\" (UniqueName: \"kubernetes.io/projected/f60bd1ab-ddc1-462f-85f9-e47d7305727d-kube-api-access-txff6\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b\" (UID: \"f60bd1ab-ddc1-462f-85f9-e47d7305727d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b" Nov 24 13:28:07 crc kubenswrapper[5039]: I1124 13:28:07.281999 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f60bd1ab-ddc1-462f-85f9-e47d7305727d-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b\" (UID: \"f60bd1ab-ddc1-462f-85f9-e47d7305727d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b" Nov 24 13:28:07 crc kubenswrapper[5039]: I1124 13:28:07.282186 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f60bd1ab-ddc1-462f-85f9-e47d7305727d-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b\" (UID: \"f60bd1ab-ddc1-462f-85f9-e47d7305727d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b" Nov 24 13:28:07 crc kubenswrapper[5039]: I1124 13:28:07.299699 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txff6\" (UniqueName: \"kubernetes.io/projected/f60bd1ab-ddc1-462f-85f9-e47d7305727d-kube-api-access-txff6\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b\" (UID: \"f60bd1ab-ddc1-462f-85f9-e47d7305727d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b" Nov 24 13:28:07 crc kubenswrapper[5039]: I1124 13:28:07.349473 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b" Nov 24 13:28:07 crc kubenswrapper[5039]: I1124 13:28:07.802780 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b"] Nov 24 13:28:07 crc kubenswrapper[5039]: W1124 13:28:07.829478 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf60bd1ab_ddc1_462f_85f9_e47d7305727d.slice/crio-198d90ede9b0f2ca4067f49f67e5c16ea010433541c1d1cbb40330de2c4a1d5a WatchSource:0}: Error finding container 198d90ede9b0f2ca4067f49f67e5c16ea010433541c1d1cbb40330de2c4a1d5a: Status 404 returned error can't find the container with id 198d90ede9b0f2ca4067f49f67e5c16ea010433541c1d1cbb40330de2c4a1d5a Nov 24 13:28:08 crc kubenswrapper[5039]: I1124 13:28:08.807091 5039 generic.go:334] "Generic (PLEG): container finished" podID="f60bd1ab-ddc1-462f-85f9-e47d7305727d" containerID="48f3d375d73737b4f7b7da5d663cb06af34d5569a82a0b855a97dc97476e9731" exitCode=0 Nov 24 13:28:08 crc kubenswrapper[5039]: I1124 13:28:08.807149 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b" event={"ID":"f60bd1ab-ddc1-462f-85f9-e47d7305727d","Type":"ContainerDied","Data":"48f3d375d73737b4f7b7da5d663cb06af34d5569a82a0b855a97dc97476e9731"} Nov 24 13:28:08 crc kubenswrapper[5039]: I1124 13:28:08.807472 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b" event={"ID":"f60bd1ab-ddc1-462f-85f9-e47d7305727d","Type":"ContainerStarted","Data":"198d90ede9b0f2ca4067f49f67e5c16ea010433541c1d1cbb40330de2c4a1d5a"} Nov 24 13:28:08 crc kubenswrapper[5039]: I1124 13:28:08.811329 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 13:28:09 crc kubenswrapper[5039]: I1124 13:28:09.817011 5039 generic.go:334] "Generic (PLEG): container finished" podID="f60bd1ab-ddc1-462f-85f9-e47d7305727d" containerID="d0ed4ec6a4a75aba4c637e28fa3b3e1130bcf70101ce285741cc3f5da7227c20" exitCode=0 Nov 24 13:28:09 crc kubenswrapper[5039]: I1124 13:28:09.817086 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b" event={"ID":"f60bd1ab-ddc1-462f-85f9-e47d7305727d","Type":"ContainerDied","Data":"d0ed4ec6a4a75aba4c637e28fa3b3e1130bcf70101ce285741cc3f5da7227c20"} Nov 24 13:28:10 crc kubenswrapper[5039]: I1124 13:28:10.828878 5039 generic.go:334] "Generic (PLEG): container finished" podID="f60bd1ab-ddc1-462f-85f9-e47d7305727d" containerID="047ba33b79997f9f3c237bc56b9af94dd7ec7a4b6425e9cc26fd1a3036f93ebc" exitCode=0 Nov 24 13:28:10 crc kubenswrapper[5039]: I1124 13:28:10.828961 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b" event={"ID":"f60bd1ab-ddc1-462f-85f9-e47d7305727d","Type":"ContainerDied","Data":"047ba33b79997f9f3c237bc56b9af94dd7ec7a4b6425e9cc26fd1a3036f93ebc"} Nov 24 13:28:12 crc kubenswrapper[5039]: I1124 13:28:12.070299 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b" Nov 24 13:28:12 crc kubenswrapper[5039]: I1124 13:28:12.251705 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-txff6\" (UniqueName: \"kubernetes.io/projected/f60bd1ab-ddc1-462f-85f9-e47d7305727d-kube-api-access-txff6\") pod \"f60bd1ab-ddc1-462f-85f9-e47d7305727d\" (UID: \"f60bd1ab-ddc1-462f-85f9-e47d7305727d\") " Nov 24 13:28:12 crc kubenswrapper[5039]: I1124 13:28:12.251778 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f60bd1ab-ddc1-462f-85f9-e47d7305727d-bundle\") pod \"f60bd1ab-ddc1-462f-85f9-e47d7305727d\" (UID: \"f60bd1ab-ddc1-462f-85f9-e47d7305727d\") " Nov 24 13:28:12 crc kubenswrapper[5039]: I1124 13:28:12.251846 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f60bd1ab-ddc1-462f-85f9-e47d7305727d-util\") pod \"f60bd1ab-ddc1-462f-85f9-e47d7305727d\" (UID: \"f60bd1ab-ddc1-462f-85f9-e47d7305727d\") " Nov 24 13:28:12 crc kubenswrapper[5039]: I1124 13:28:12.255404 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f60bd1ab-ddc1-462f-85f9-e47d7305727d-bundle" (OuterVolumeSpecName: "bundle") pod "f60bd1ab-ddc1-462f-85f9-e47d7305727d" (UID: "f60bd1ab-ddc1-462f-85f9-e47d7305727d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:28:12 crc kubenswrapper[5039]: I1124 13:28:12.266330 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f60bd1ab-ddc1-462f-85f9-e47d7305727d-kube-api-access-txff6" (OuterVolumeSpecName: "kube-api-access-txff6") pod "f60bd1ab-ddc1-462f-85f9-e47d7305727d" (UID: "f60bd1ab-ddc1-462f-85f9-e47d7305727d"). InnerVolumeSpecName "kube-api-access-txff6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:28:12 crc kubenswrapper[5039]: I1124 13:28:12.284981 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f60bd1ab-ddc1-462f-85f9-e47d7305727d-util" (OuterVolumeSpecName: "util") pod "f60bd1ab-ddc1-462f-85f9-e47d7305727d" (UID: "f60bd1ab-ddc1-462f-85f9-e47d7305727d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:28:12 crc kubenswrapper[5039]: I1124 13:28:12.354168 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-txff6\" (UniqueName: \"kubernetes.io/projected/f60bd1ab-ddc1-462f-85f9-e47d7305727d-kube-api-access-txff6\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:12 crc kubenswrapper[5039]: I1124 13:28:12.354226 5039 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f60bd1ab-ddc1-462f-85f9-e47d7305727d-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:12 crc kubenswrapper[5039]: I1124 13:28:12.354246 5039 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f60bd1ab-ddc1-462f-85f9-e47d7305727d-util\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:12 crc kubenswrapper[5039]: I1124 13:28:12.846128 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b" event={"ID":"f60bd1ab-ddc1-462f-85f9-e47d7305727d","Type":"ContainerDied","Data":"198d90ede9b0f2ca4067f49f67e5c16ea010433541c1d1cbb40330de2c4a1d5a"} Nov 24 13:28:12 crc kubenswrapper[5039]: I1124 13:28:12.846199 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="198d90ede9b0f2ca4067f49f67e5c16ea010433541c1d1cbb40330de2c4a1d5a" Nov 24 13:28:12 crc kubenswrapper[5039]: I1124 13:28:12.846297 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b" Nov 24 13:28:15 crc kubenswrapper[5039]: E1124 13:28:15.861897 5039 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf60bd1ab_ddc1_462f_85f9_e47d7305727d.slice/crio-conmon-48f3d375d73737b4f7b7da5d663cb06af34d5569a82a0b855a97dc97476e9731.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf60bd1ab_ddc1_462f_85f9_e47d7305727d.slice/crio-48f3d375d73737b4f7b7da5d663cb06af34d5569a82a0b855a97dc97476e9731.scope\": RecentStats: unable to find data in memory cache]" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.478277 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-w2ctb"] Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.478810 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="nbdb" containerID="cri-o://86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224" gracePeriod=30 Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.478914 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="northd" containerID="cri-o://95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90" gracePeriod=30 Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.478923 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614" gracePeriod=30 Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.479050 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="kube-rbac-proxy-node" containerID="cri-o://7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a" gracePeriod=30 Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.479116 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovn-acl-logging" containerID="cri-o://2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad" gracePeriod=30 Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.479193 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="sbdb" containerID="cri-o://7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288" gracePeriod=30 Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.479237 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovn-controller" containerID="cri-o://d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb" gracePeriod=30 Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.519174 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovnkube-controller" containerID="cri-o://99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf" gracePeriod=30 Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.816168 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w2ctb_54c05b03-6747-47bf-a40d-8a9332c4d856/ovnkube-controller/3.log" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.818439 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w2ctb_54c05b03-6747-47bf-a40d-8a9332c4d856/ovn-acl-logging/0.log" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.819346 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w2ctb_54c05b03-6747-47bf-a40d-8a9332c4d856/ovn-controller/0.log" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.819777 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.882578 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w2ctb_54c05b03-6747-47bf-a40d-8a9332c4d856/ovnkube-controller/3.log" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.885387 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w2ctb_54c05b03-6747-47bf-a40d-8a9332c4d856/ovn-acl-logging/0.log" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.885980 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w2ctb_54c05b03-6747-47bf-a40d-8a9332c4d856/ovn-controller/0.log" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886376 5039 generic.go:334] "Generic (PLEG): container finished" podID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerID="99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf" exitCode=0 Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886405 5039 generic.go:334] "Generic (PLEG): container finished" podID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerID="7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288" exitCode=0 Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886413 5039 generic.go:334] "Generic (PLEG): container finished" podID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerID="86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224" exitCode=0 Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886424 5039 generic.go:334] "Generic (PLEG): container finished" podID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerID="95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90" exitCode=0 Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886433 5039 generic.go:334] "Generic (PLEG): container finished" podID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerID="045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614" exitCode=0 Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886442 5039 generic.go:334] "Generic (PLEG): container finished" podID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerID="7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a" exitCode=0 Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886450 5039 generic.go:334] "Generic (PLEG): container finished" podID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerID="2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad" exitCode=143 Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886459 5039 generic.go:334] "Generic (PLEG): container finished" podID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerID="d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb" exitCode=143 Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886473 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886564 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerDied","Data":"99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886627 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerDied","Data":"7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886650 5039 scope.go:117] "RemoveContainer" containerID="99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886655 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerDied","Data":"86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886675 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerDied","Data":"95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886697 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerDied","Data":"045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886716 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerDied","Data":"7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886733 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886753 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886764 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886774 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886784 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886808 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886818 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886828 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886838 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886852 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerDied","Data":"2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886868 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886880 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886890 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886900 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886910 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886921 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886930 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886940 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886949 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886959 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886973 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerDied","Data":"d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.886991 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.887001 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.887011 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.887020 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.887030 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.887040 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.887049 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.887058 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.887068 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.887078 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.887092 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2ctb" event={"ID":"54c05b03-6747-47bf-a40d-8a9332c4d856","Type":"ContainerDied","Data":"d3e18f52aa9a4413c8245a5f84d29d124d1211d07b991d1f411169fab8ac98c9"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.887109 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.887120 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.887130 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.887140 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.887149 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.887159 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.887168 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.887177 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.887188 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.887198 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.888879 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kr94g_6c18c830-d513-4df0-be92-cd44f2d2c5df/kube-multus/2.log" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.891557 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kr94g_6c18c830-d513-4df0-be92-cd44f2d2c5df/kube-multus/1.log" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.891609 5039 generic.go:334] "Generic (PLEG): container finished" podID="6c18c830-d513-4df0-be92-cd44f2d2c5df" containerID="afbc25e2b688679dbfe2c40bce4636e6482ec1605d671d0aa8e10a779e2f545a" exitCode=2 Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.891644 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kr94g" event={"ID":"6c18c830-d513-4df0-be92-cd44f2d2c5df","Type":"ContainerDied","Data":"afbc25e2b688679dbfe2c40bce4636e6482ec1605d671d0aa8e10a779e2f545a"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.891673 5039 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8f68c347316af28eef4d9d661fff4ef8497e81704ecbdb6794e54ba842a37e20"} Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.892200 5039 scope.go:117] "RemoveContainer" containerID="afbc25e2b688679dbfe2c40bce4636e6482ec1605d671d0aa8e10a779e2f545a" Nov 24 13:28:18 crc kubenswrapper[5039]: E1124 13:28:18.892548 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-kr94g_openshift-multus(6c18c830-d513-4df0-be92-cd44f2d2c5df)\"" pod="openshift-multus/multus-kr94g" podUID="6c18c830-d513-4df0-be92-cd44f2d2c5df" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.908840 5039 scope.go:117] "RemoveContainer" containerID="217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.927233 5039 scope.go:117] "RemoveContainer" containerID="7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.944723 5039 scope.go:117] "RemoveContainer" containerID="86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.945461 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-cni-bin\") pod \"54c05b03-6747-47bf-a40d-8a9332c4d856\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.945514 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-var-lib-openvswitch\") pod \"54c05b03-6747-47bf-a40d-8a9332c4d856\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.945552 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "54c05b03-6747-47bf-a40d-8a9332c4d856" (UID: "54c05b03-6747-47bf-a40d-8a9332c4d856"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.945566 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/54c05b03-6747-47bf-a40d-8a9332c4d856-ovn-node-metrics-cert\") pod \"54c05b03-6747-47bf-a40d-8a9332c4d856\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.945589 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-run-openvswitch\") pod \"54c05b03-6747-47bf-a40d-8a9332c4d856\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.945595 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "54c05b03-6747-47bf-a40d-8a9332c4d856" (UID: "54c05b03-6747-47bf-a40d-8a9332c4d856"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.945616 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-run-netns\") pod \"54c05b03-6747-47bf-a40d-8a9332c4d856\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.945641 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-var-lib-cni-networks-ovn-kubernetes\") pod \"54c05b03-6747-47bf-a40d-8a9332c4d856\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.945671 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-cni-netd\") pod \"54c05b03-6747-47bf-a40d-8a9332c4d856\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.945693 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-run-ovn-kubernetes\") pod \"54c05b03-6747-47bf-a40d-8a9332c4d856\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.945704 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "54c05b03-6747-47bf-a40d-8a9332c4d856" (UID: "54c05b03-6747-47bf-a40d-8a9332c4d856"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.945720 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "54c05b03-6747-47bf-a40d-8a9332c4d856" (UID: "54c05b03-6747-47bf-a40d-8a9332c4d856"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.945728 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/54c05b03-6747-47bf-a40d-8a9332c4d856-env-overrides\") pod \"54c05b03-6747-47bf-a40d-8a9332c4d856\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.945787 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "54c05b03-6747-47bf-a40d-8a9332c4d856" (UID: "54c05b03-6747-47bf-a40d-8a9332c4d856"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.945797 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "54c05b03-6747-47bf-a40d-8a9332c4d856" (UID: "54c05b03-6747-47bf-a40d-8a9332c4d856"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.945820 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "54c05b03-6747-47bf-a40d-8a9332c4d856" (UID: "54c05b03-6747-47bf-a40d-8a9332c4d856"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.945873 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-run-ovn\") pod \"54c05b03-6747-47bf-a40d-8a9332c4d856\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.945909 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "54c05b03-6747-47bf-a40d-8a9332c4d856" (UID: "54c05b03-6747-47bf-a40d-8a9332c4d856"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946039 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-log-socket\") pod \"54c05b03-6747-47bf-a40d-8a9332c4d856\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946071 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fhfzg\" (UniqueName: \"kubernetes.io/projected/54c05b03-6747-47bf-a40d-8a9332c4d856-kube-api-access-fhfzg\") pod \"54c05b03-6747-47bf-a40d-8a9332c4d856\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946083 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54c05b03-6747-47bf-a40d-8a9332c4d856-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "54c05b03-6747-47bf-a40d-8a9332c4d856" (UID: "54c05b03-6747-47bf-a40d-8a9332c4d856"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946103 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/54c05b03-6747-47bf-a40d-8a9332c4d856-ovnkube-script-lib\") pod \"54c05b03-6747-47bf-a40d-8a9332c4d856\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946129 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-run-systemd\") pod \"54c05b03-6747-47bf-a40d-8a9332c4d856\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946134 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-log-socket" (OuterVolumeSpecName: "log-socket") pod "54c05b03-6747-47bf-a40d-8a9332c4d856" (UID: "54c05b03-6747-47bf-a40d-8a9332c4d856"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946150 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-etc-openvswitch\") pod \"54c05b03-6747-47bf-a40d-8a9332c4d856\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946175 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-kubelet\") pod \"54c05b03-6747-47bf-a40d-8a9332c4d856\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946194 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-systemd-units\") pod \"54c05b03-6747-47bf-a40d-8a9332c4d856\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946211 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-node-log\") pod \"54c05b03-6747-47bf-a40d-8a9332c4d856\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946234 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/54c05b03-6747-47bf-a40d-8a9332c4d856-ovnkube-config\") pod \"54c05b03-6747-47bf-a40d-8a9332c4d856\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946260 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-slash\") pod \"54c05b03-6747-47bf-a40d-8a9332c4d856\" (UID: \"54c05b03-6747-47bf-a40d-8a9332c4d856\") " Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946469 5039 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946487 5039 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946499 5039 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946526 5039 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946538 5039 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946548 5039 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946558 5039 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/54c05b03-6747-47bf-a40d-8a9332c4d856-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946567 5039 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946577 5039 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-log-socket\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946587 5039 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946256 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "54c05b03-6747-47bf-a40d-8a9332c4d856" (UID: "54c05b03-6747-47bf-a40d-8a9332c4d856"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946306 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-node-log" (OuterVolumeSpecName: "node-log") pod "54c05b03-6747-47bf-a40d-8a9332c4d856" (UID: "54c05b03-6747-47bf-a40d-8a9332c4d856"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946646 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-slash" (OuterVolumeSpecName: "host-slash") pod "54c05b03-6747-47bf-a40d-8a9332c4d856" (UID: "54c05b03-6747-47bf-a40d-8a9332c4d856"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946324 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "54c05b03-6747-47bf-a40d-8a9332c4d856" (UID: "54c05b03-6747-47bf-a40d-8a9332c4d856"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946614 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54c05b03-6747-47bf-a40d-8a9332c4d856-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "54c05b03-6747-47bf-a40d-8a9332c4d856" (UID: "54c05b03-6747-47bf-a40d-8a9332c4d856"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946620 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54c05b03-6747-47bf-a40d-8a9332c4d856-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "54c05b03-6747-47bf-a40d-8a9332c4d856" (UID: "54c05b03-6747-47bf-a40d-8a9332c4d856"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.946814 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "54c05b03-6747-47bf-a40d-8a9332c4d856" (UID: "54c05b03-6747-47bf-a40d-8a9332c4d856"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.963882 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54c05b03-6747-47bf-a40d-8a9332c4d856-kube-api-access-fhfzg" (OuterVolumeSpecName: "kube-api-access-fhfzg") pod "54c05b03-6747-47bf-a40d-8a9332c4d856" (UID: "54c05b03-6747-47bf-a40d-8a9332c4d856"). InnerVolumeSpecName "kube-api-access-fhfzg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.965773 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54c05b03-6747-47bf-a40d-8a9332c4d856-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "54c05b03-6747-47bf-a40d-8a9332c4d856" (UID: "54c05b03-6747-47bf-a40d-8a9332c4d856"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.972018 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "54c05b03-6747-47bf-a40d-8a9332c4d856" (UID: "54c05b03-6747-47bf-a40d-8a9332c4d856"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:28:18 crc kubenswrapper[5039]: I1124 13:28:18.984675 5039 scope.go:117] "RemoveContainer" containerID="95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.015443 5039 scope.go:117] "RemoveContainer" containerID="045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.024927 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-8hz2p"] Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.025169 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="kube-rbac-proxy-ovn-metrics" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025186 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="kube-rbac-proxy-ovn-metrics" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.025199 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="nbdb" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025205 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="nbdb" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.025218 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovn-controller" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025225 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovn-controller" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.025235 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f60bd1ab-ddc1-462f-85f9-e47d7305727d" containerName="util" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025242 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="f60bd1ab-ddc1-462f-85f9-e47d7305727d" containerName="util" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.025254 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovnkube-controller" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025261 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovnkube-controller" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.025268 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovnkube-controller" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025276 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovnkube-controller" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.025284 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="kubecfg-setup" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025291 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="kubecfg-setup" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.025301 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f60bd1ab-ddc1-462f-85f9-e47d7305727d" containerName="pull" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025307 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="f60bd1ab-ddc1-462f-85f9-e47d7305727d" containerName="pull" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.025318 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f60bd1ab-ddc1-462f-85f9-e47d7305727d" containerName="extract" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025325 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="f60bd1ab-ddc1-462f-85f9-e47d7305727d" containerName="extract" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.025334 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="sbdb" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025340 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="sbdb" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.025350 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="kube-rbac-proxy-node" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025357 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="kube-rbac-proxy-node" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.025364 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="northd" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025370 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="northd" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.025384 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovn-acl-logging" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025392 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovn-acl-logging" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.025402 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovnkube-controller" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025410 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovnkube-controller" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025561 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="northd" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025571 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovnkube-controller" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025580 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovnkube-controller" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025590 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovn-controller" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025597 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="kube-rbac-proxy-ovn-metrics" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025612 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovn-acl-logging" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025623 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="kube-rbac-proxy-node" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025630 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="nbdb" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025640 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="f60bd1ab-ddc1-462f-85f9-e47d7305727d" containerName="extract" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025650 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovnkube-controller" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025658 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="sbdb" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025668 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovnkube-controller" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.025776 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovnkube-controller" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025785 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovnkube-controller" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.025902 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovnkube-controller" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.026009 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovnkube-controller" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.026020 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" containerName="ovnkube-controller" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.035373 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.039696 5039 scope.go:117] "RemoveContainer" containerID="7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.047647 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-host-kubelet\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.047826 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-systemd-units\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.047892 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.047921 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-run-ovn\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.047968 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-var-lib-openvswitch\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.047996 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48zbm\" (UniqueName: \"kubernetes.io/projected/e46fcc9c-d5aa-456f-b440-9deed992d7c6-kube-api-access-48zbm\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048060 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-log-socket\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048086 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-host-slash\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048105 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e46fcc9c-d5aa-456f-b440-9deed992d7c6-ovnkube-config\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048124 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-etc-openvswitch\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048143 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-run-openvswitch\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048235 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-run-systemd\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048317 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e46fcc9c-d5aa-456f-b440-9deed992d7c6-env-overrides\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048364 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/e46fcc9c-d5aa-456f-b440-9deed992d7c6-ovnkube-script-lib\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048428 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-node-log\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048456 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-host-cni-netd\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048477 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-host-run-ovn-kubernetes\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048558 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-host-cni-bin\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048582 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-host-run-netns\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048606 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e46fcc9c-d5aa-456f-b440-9deed992d7c6-ovn-node-metrics-cert\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048685 5039 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/54c05b03-6747-47bf-a40d-8a9332c4d856-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048705 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fhfzg\" (UniqueName: \"kubernetes.io/projected/54c05b03-6747-47bf-a40d-8a9332c4d856-kube-api-access-fhfzg\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048716 5039 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/54c05b03-6747-47bf-a40d-8a9332c4d856-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048727 5039 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048738 5039 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048748 5039 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048760 5039 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-node-log\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048770 5039 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048780 5039 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/54c05b03-6747-47bf-a40d-8a9332c4d856-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.048791 5039 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/54c05b03-6747-47bf-a40d-8a9332c4d856-host-slash\") on node \"crc\" DevicePath \"\"" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.054437 5039 scope.go:117] "RemoveContainer" containerID="2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.078226 5039 scope.go:117] "RemoveContainer" containerID="d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.096192 5039 scope.go:117] "RemoveContainer" containerID="23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.110533 5039 scope.go:117] "RemoveContainer" containerID="99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.110883 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf\": container with ID starting with 99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf not found: ID does not exist" containerID="99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.110915 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf"} err="failed to get container status \"99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf\": rpc error: code = NotFound desc = could not find container \"99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf\": container with ID starting with 99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.110935 5039 scope.go:117] "RemoveContainer" containerID="217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.113795 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce\": container with ID starting with 217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce not found: ID does not exist" containerID="217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.113818 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce"} err="failed to get container status \"217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce\": rpc error: code = NotFound desc = could not find container \"217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce\": container with ID starting with 217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.113833 5039 scope.go:117] "RemoveContainer" containerID="7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.114302 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\": container with ID starting with 7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288 not found: ID does not exist" containerID="7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.114323 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288"} err="failed to get container status \"7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\": rpc error: code = NotFound desc = could not find container \"7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\": container with ID starting with 7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288 not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.114334 5039 scope.go:117] "RemoveContainer" containerID="86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.114741 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\": container with ID starting with 86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224 not found: ID does not exist" containerID="86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.114808 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224"} err="failed to get container status \"86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\": rpc error: code = NotFound desc = could not find container \"86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\": container with ID starting with 86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224 not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.114847 5039 scope.go:117] "RemoveContainer" containerID="95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.115117 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\": container with ID starting with 95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90 not found: ID does not exist" containerID="95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.115140 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90"} err="failed to get container status \"95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\": rpc error: code = NotFound desc = could not find container \"95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\": container with ID starting with 95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90 not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.115156 5039 scope.go:117] "RemoveContainer" containerID="045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.115375 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\": container with ID starting with 045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614 not found: ID does not exist" containerID="045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.115408 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614"} err="failed to get container status \"045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\": rpc error: code = NotFound desc = could not find container \"045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\": container with ID starting with 045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614 not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.115431 5039 scope.go:117] "RemoveContainer" containerID="7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.115701 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\": container with ID starting with 7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a not found: ID does not exist" containerID="7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.115729 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a"} err="failed to get container status \"7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\": rpc error: code = NotFound desc = could not find container \"7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\": container with ID starting with 7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.115746 5039 scope.go:117] "RemoveContainer" containerID="2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.116018 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\": container with ID starting with 2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad not found: ID does not exist" containerID="2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.116064 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad"} err="failed to get container status \"2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\": rpc error: code = NotFound desc = could not find container \"2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\": container with ID starting with 2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.116096 5039 scope.go:117] "RemoveContainer" containerID="d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.116403 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\": container with ID starting with d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb not found: ID does not exist" containerID="d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.116440 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb"} err="failed to get container status \"d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\": rpc error: code = NotFound desc = could not find container \"d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\": container with ID starting with d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.116459 5039 scope.go:117] "RemoveContainer" containerID="23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c" Nov 24 13:28:19 crc kubenswrapper[5039]: E1124 13:28:19.116714 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\": container with ID starting with 23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c not found: ID does not exist" containerID="23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.116748 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c"} err="failed to get container status \"23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\": rpc error: code = NotFound desc = could not find container \"23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\": container with ID starting with 23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.116764 5039 scope.go:117] "RemoveContainer" containerID="99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.117815 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf"} err="failed to get container status \"99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf\": rpc error: code = NotFound desc = could not find container \"99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf\": container with ID starting with 99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.117846 5039 scope.go:117] "RemoveContainer" containerID="217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.118130 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce"} err="failed to get container status \"217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce\": rpc error: code = NotFound desc = could not find container \"217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce\": container with ID starting with 217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.118158 5039 scope.go:117] "RemoveContainer" containerID="7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.118365 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288"} err="failed to get container status \"7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\": rpc error: code = NotFound desc = could not find container \"7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\": container with ID starting with 7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288 not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.118390 5039 scope.go:117] "RemoveContainer" containerID="86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.118658 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224"} err="failed to get container status \"86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\": rpc error: code = NotFound desc = could not find container \"86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\": container with ID starting with 86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224 not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.118678 5039 scope.go:117] "RemoveContainer" containerID="95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.118939 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90"} err="failed to get container status \"95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\": rpc error: code = NotFound desc = could not find container \"95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\": container with ID starting with 95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90 not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.118957 5039 scope.go:117] "RemoveContainer" containerID="045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.119134 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614"} err="failed to get container status \"045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\": rpc error: code = NotFound desc = could not find container \"045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\": container with ID starting with 045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614 not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.119158 5039 scope.go:117] "RemoveContainer" containerID="7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.119460 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a"} err="failed to get container status \"7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\": rpc error: code = NotFound desc = could not find container \"7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\": container with ID starting with 7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.119489 5039 scope.go:117] "RemoveContainer" containerID="2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.119766 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad"} err="failed to get container status \"2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\": rpc error: code = NotFound desc = could not find container \"2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\": container with ID starting with 2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.119787 5039 scope.go:117] "RemoveContainer" containerID="d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.120182 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb"} err="failed to get container status \"d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\": rpc error: code = NotFound desc = could not find container \"d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\": container with ID starting with d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.120203 5039 scope.go:117] "RemoveContainer" containerID="23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.120434 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c"} err="failed to get container status \"23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\": rpc error: code = NotFound desc = could not find container \"23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\": container with ID starting with 23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.120462 5039 scope.go:117] "RemoveContainer" containerID="99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.120721 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf"} err="failed to get container status \"99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf\": rpc error: code = NotFound desc = could not find container \"99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf\": container with ID starting with 99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.120755 5039 scope.go:117] "RemoveContainer" containerID="217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.120960 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce"} err="failed to get container status \"217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce\": rpc error: code = NotFound desc = could not find container \"217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce\": container with ID starting with 217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.120980 5039 scope.go:117] "RemoveContainer" containerID="7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.121164 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288"} err="failed to get container status \"7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\": rpc error: code = NotFound desc = could not find container \"7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\": container with ID starting with 7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288 not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.121188 5039 scope.go:117] "RemoveContainer" containerID="86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.121347 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224"} err="failed to get container status \"86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\": rpc error: code = NotFound desc = could not find container \"86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\": container with ID starting with 86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224 not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.121366 5039 scope.go:117] "RemoveContainer" containerID="95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.121549 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90"} err="failed to get container status \"95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\": rpc error: code = NotFound desc = could not find container \"95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\": container with ID starting with 95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90 not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.121590 5039 scope.go:117] "RemoveContainer" containerID="045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.121775 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614"} err="failed to get container status \"045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\": rpc error: code = NotFound desc = could not find container \"045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\": container with ID starting with 045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614 not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.121794 5039 scope.go:117] "RemoveContainer" containerID="7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.121951 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a"} err="failed to get container status \"7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\": rpc error: code = NotFound desc = could not find container \"7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\": container with ID starting with 7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.121969 5039 scope.go:117] "RemoveContainer" containerID="2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.122206 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad"} err="failed to get container status \"2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\": rpc error: code = NotFound desc = could not find container \"2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\": container with ID starting with 2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.122239 5039 scope.go:117] "RemoveContainer" containerID="d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.122443 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb"} err="failed to get container status \"d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\": rpc error: code = NotFound desc = could not find container \"d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\": container with ID starting with d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.122462 5039 scope.go:117] "RemoveContainer" containerID="23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.122724 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c"} err="failed to get container status \"23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\": rpc error: code = NotFound desc = could not find container \"23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\": container with ID starting with 23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.122743 5039 scope.go:117] "RemoveContainer" containerID="99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.123138 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf"} err="failed to get container status \"99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf\": rpc error: code = NotFound desc = could not find container \"99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf\": container with ID starting with 99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.123166 5039 scope.go:117] "RemoveContainer" containerID="217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.126047 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce"} err="failed to get container status \"217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce\": rpc error: code = NotFound desc = could not find container \"217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce\": container with ID starting with 217002ff091719552c8d06b17d4c3e4c703f21f513168039f866a8e88930b8ce not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.126075 5039 scope.go:117] "RemoveContainer" containerID="7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.126376 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288"} err="failed to get container status \"7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\": rpc error: code = NotFound desc = could not find container \"7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288\": container with ID starting with 7c8e6805dd2d4420e47e57259087f388008d572fa6fe8789835601c76a1b3288 not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.126405 5039 scope.go:117] "RemoveContainer" containerID="86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.127517 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224"} err="failed to get container status \"86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\": rpc error: code = NotFound desc = could not find container \"86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224\": container with ID starting with 86ad342ab4bfd5e07fc74cee230868da0968b8977a6ea53972e5bfd38200b224 not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.127538 5039 scope.go:117] "RemoveContainer" containerID="95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.127866 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90"} err="failed to get container status \"95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\": rpc error: code = NotFound desc = could not find container \"95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90\": container with ID starting with 95ea1d7feee51434f1828ad7a0374871bdc444f5a2667a85745aad22c2f0eb90 not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.127901 5039 scope.go:117] "RemoveContainer" containerID="045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.128132 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614"} err="failed to get container status \"045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\": rpc error: code = NotFound desc = could not find container \"045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614\": container with ID starting with 045865c6afd41a290df8af29aa99bca2b9cc4c2ef020605f6c6326e9ee724614 not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.128150 5039 scope.go:117] "RemoveContainer" containerID="7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.128527 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a"} err="failed to get container status \"7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\": rpc error: code = NotFound desc = could not find container \"7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a\": container with ID starting with 7a5301581f6408cfca9ae12797d6f440fa8163e206eb6438becf83d703484e2a not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.128549 5039 scope.go:117] "RemoveContainer" containerID="2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.128808 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad"} err="failed to get container status \"2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\": rpc error: code = NotFound desc = could not find container \"2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad\": container with ID starting with 2fbbc5c05082f694adda5c57a16572a042a5f9dd78de7f32dc6c32bd52fe14ad not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.128827 5039 scope.go:117] "RemoveContainer" containerID="d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.129149 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb"} err="failed to get container status \"d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\": rpc error: code = NotFound desc = could not find container \"d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb\": container with ID starting with d8fa87a2c78fc4791b4e2059e76b27dbe321a5b10e7b85c80a46c8e2061730cb not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.129210 5039 scope.go:117] "RemoveContainer" containerID="23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.129523 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c"} err="failed to get container status \"23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\": rpc error: code = NotFound desc = could not find container \"23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c\": container with ID starting with 23c096b43593370b9cb51a46f15a6df186fa35e85553fe0c16c8ae07f9d08e3c not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.129546 5039 scope.go:117] "RemoveContainer" containerID="99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.129897 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf"} err="failed to get container status \"99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf\": rpc error: code = NotFound desc = could not find container \"99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf\": container with ID starting with 99c5b5e79f97b4d7884d6b8074b7800ee508ef96b0b846ef637fa476acee40bf not found: ID does not exist" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150172 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48zbm\" (UniqueName: \"kubernetes.io/projected/e46fcc9c-d5aa-456f-b440-9deed992d7c6-kube-api-access-48zbm\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150227 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-log-socket\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150254 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e46fcc9c-d5aa-456f-b440-9deed992d7c6-ovnkube-config\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150275 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-host-slash\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150298 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-etc-openvswitch\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150315 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-run-openvswitch\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150347 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-run-systemd\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150370 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e46fcc9c-d5aa-456f-b440-9deed992d7c6-env-overrides\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150366 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-log-socket\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150391 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/e46fcc9c-d5aa-456f-b440-9deed992d7c6-ovnkube-script-lib\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150539 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-node-log\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150575 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-host-cni-netd\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150614 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-host-run-ovn-kubernetes\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150615 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-node-log\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150643 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-host-cni-bin\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150664 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-run-openvswitch\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150675 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-host-run-netns\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150694 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-host-slash\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150716 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-etc-openvswitch\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150710 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e46fcc9c-d5aa-456f-b440-9deed992d7c6-ovn-node-metrics-cert\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150739 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-run-systemd\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150751 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-host-kubelet\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150782 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-systemd-units\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150805 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150835 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-run-ovn\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150866 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-var-lib-openvswitch\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150959 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-var-lib-openvswitch\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.150999 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-host-cni-netd\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.151028 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-host-run-ovn-kubernetes\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.151057 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-host-cni-bin\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.151081 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-host-run-netns\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.151208 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e46fcc9c-d5aa-456f-b440-9deed992d7c6-env-overrides\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.151243 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/e46fcc9c-d5aa-456f-b440-9deed992d7c6-ovnkube-script-lib\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.151268 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-host-kubelet\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.151246 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-systemd-units\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.151293 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.151279 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e46fcc9c-d5aa-456f-b440-9deed992d7c6-ovnkube-config\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.151323 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/e46fcc9c-d5aa-456f-b440-9deed992d7c6-run-ovn\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.156288 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e46fcc9c-d5aa-456f-b440-9deed992d7c6-ovn-node-metrics-cert\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.168784 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48zbm\" (UniqueName: \"kubernetes.io/projected/e46fcc9c-d5aa-456f-b440-9deed992d7c6-kube-api-access-48zbm\") pod \"ovnkube-node-8hz2p\" (UID: \"e46fcc9c-d5aa-456f-b440-9deed992d7c6\") " pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.220027 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-w2ctb"] Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.223446 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-w2ctb"] Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.355362 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.898203 5039 generic.go:334] "Generic (PLEG): container finished" podID="e46fcc9c-d5aa-456f-b440-9deed992d7c6" containerID="443294443f09c225013b704fecdc1ba0ee085fc34600f8970a1de42564ee9b3d" exitCode=0 Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.898333 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" event={"ID":"e46fcc9c-d5aa-456f-b440-9deed992d7c6","Type":"ContainerDied","Data":"443294443f09c225013b704fecdc1ba0ee085fc34600f8970a1de42564ee9b3d"} Nov 24 13:28:19 crc kubenswrapper[5039]: I1124 13:28:19.898394 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" event={"ID":"e46fcc9c-d5aa-456f-b440-9deed992d7c6","Type":"ContainerStarted","Data":"4fc489bbd064608dbab24ab6804b18ecb3bd0fd18dc4b918539ac8715f7d60f9"} Nov 24 13:28:20 crc kubenswrapper[5039]: I1124 13:28:20.314004 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54c05b03-6747-47bf-a40d-8a9332c4d856" path="/var/lib/kubelet/pods/54c05b03-6747-47bf-a40d-8a9332c4d856/volumes" Nov 24 13:28:20 crc kubenswrapper[5039]: I1124 13:28:20.910696 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" event={"ID":"e46fcc9c-d5aa-456f-b440-9deed992d7c6","Type":"ContainerStarted","Data":"413217c155cc7fce558ee77316824e7326efea080a01f84fbcbc0eb2b1275507"} Nov 24 13:28:20 crc kubenswrapper[5039]: I1124 13:28:20.910944 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" event={"ID":"e46fcc9c-d5aa-456f-b440-9deed992d7c6","Type":"ContainerStarted","Data":"7a36438bfd51e4a58460aff7fd48c23014b186fdbe08b7708e669dc93c4cda28"} Nov 24 13:28:20 crc kubenswrapper[5039]: I1124 13:28:20.910954 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" event={"ID":"e46fcc9c-d5aa-456f-b440-9deed992d7c6","Type":"ContainerStarted","Data":"e27913f82654f2addb737d6a0aa8b401a9de4d698a1744fd93db3253715706fc"} Nov 24 13:28:20 crc kubenswrapper[5039]: I1124 13:28:20.910963 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" event={"ID":"e46fcc9c-d5aa-456f-b440-9deed992d7c6","Type":"ContainerStarted","Data":"2b15e9de0775c3eb19d8f14f15b813418307f6020afe008fc0c9210d7c1b1d40"} Nov 24 13:28:20 crc kubenswrapper[5039]: I1124 13:28:20.910975 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" event={"ID":"e46fcc9c-d5aa-456f-b440-9deed992d7c6","Type":"ContainerStarted","Data":"40a2f2b0a26799ab654a01b76816c51df476e1f606b97e349b93911f5ad29413"} Nov 24 13:28:20 crc kubenswrapper[5039]: I1124 13:28:20.910983 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" event={"ID":"e46fcc9c-d5aa-456f-b440-9deed992d7c6","Type":"ContainerStarted","Data":"b05da9a081eff8ef79e7c76597275c785fb0a347256a90774a2cf5d17fc42971"} Nov 24 13:28:22 crc kubenswrapper[5039]: I1124 13:28:22.922776 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" event={"ID":"e46fcc9c-d5aa-456f-b440-9deed992d7c6","Type":"ContainerStarted","Data":"aeb1d51c393d0612e834cdf3dfa3ecdb3509981d69f3fe039ba0863d67e2df76"} Nov 24 13:28:24 crc kubenswrapper[5039]: I1124 13:28:24.867353 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn"] Nov 24 13:28:24 crc kubenswrapper[5039]: I1124 13:28:24.868427 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" Nov 24 13:28:24 crc kubenswrapper[5039]: I1124 13:28:24.871948 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 24 13:28:24 crc kubenswrapper[5039]: I1124 13:28:24.872546 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-7hdfv" Nov 24 13:28:24 crc kubenswrapper[5039]: I1124 13:28:24.872742 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 24 13:28:24 crc kubenswrapper[5039]: I1124 13:28:24.919225 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq"] Nov 24 13:28:24 crc kubenswrapper[5039]: I1124 13:28:24.920136 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" Nov 24 13:28:24 crc kubenswrapper[5039]: I1124 13:28:24.922668 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-tn86d" Nov 24 13:28:24 crc kubenswrapper[5039]: I1124 13:28:24.922837 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 24 13:28:24 crc kubenswrapper[5039]: I1124 13:28:24.931428 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w"] Nov 24 13:28:24 crc kubenswrapper[5039]: I1124 13:28:24.932837 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.005112 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-6t44d"] Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.005805 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.009175 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.009237 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-xh694" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.018066 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7lkhq\" (UniqueName: \"kubernetes.io/projected/dcdfb73e-765a-4fba-bdcb-0ca1cd215211-kube-api-access-7lkhq\") pod \"observability-operator-d8bb48f5d-6t44d\" (UID: \"dcdfb73e-765a-4fba-bdcb-0ca1cd215211\") " pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.018105 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/dcdfb73e-765a-4fba-bdcb-0ca1cd215211-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-6t44d\" (UID: \"dcdfb73e-765a-4fba-bdcb-0ca1cd215211\") " pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.018124 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s58vp\" (UniqueName: \"kubernetes.io/projected/21190c12-076c-4263-a68d-6dc4117e1d10-kube-api-access-s58vp\") pod \"obo-prometheus-operator-668cf9dfbb-dnvjn\" (UID: \"21190c12-076c-4263-a68d-6dc4117e1d10\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.018156 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/79c1d6a0-9ed7-48c8-8a09-e4695a89d953-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w\" (UID: \"79c1d6a0-9ed7-48c8-8a09-e4695a89d953\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.018181 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq\" (UID: \"eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.018201 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq\" (UID: \"eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.018222 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/79c1d6a0-9ed7-48c8-8a09-e4695a89d953-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w\" (UID: \"79c1d6a0-9ed7-48c8-8a09-e4695a89d953\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.107199 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-kkzn2"] Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.107863 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.114022 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-vl6tj" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.118922 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq\" (UID: \"eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.118958 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/79c1d6a0-9ed7-48c8-8a09-e4695a89d953-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w\" (UID: \"79c1d6a0-9ed7-48c8-8a09-e4695a89d953\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.118989 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7lkhq\" (UniqueName: \"kubernetes.io/projected/dcdfb73e-765a-4fba-bdcb-0ca1cd215211-kube-api-access-7lkhq\") pod \"observability-operator-d8bb48f5d-6t44d\" (UID: \"dcdfb73e-765a-4fba-bdcb-0ca1cd215211\") " pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.119009 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/dcdfb73e-765a-4fba-bdcb-0ca1cd215211-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-6t44d\" (UID: \"dcdfb73e-765a-4fba-bdcb-0ca1cd215211\") " pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.119027 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s58vp\" (UniqueName: \"kubernetes.io/projected/21190c12-076c-4263-a68d-6dc4117e1d10-kube-api-access-s58vp\") pod \"obo-prometheus-operator-668cf9dfbb-dnvjn\" (UID: \"21190c12-076c-4263-a68d-6dc4117e1d10\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.119066 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b-openshift-service-ca\") pod \"perses-operator-5446b9c989-kkzn2\" (UID: \"f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b\") " pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.119097 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/79c1d6a0-9ed7-48c8-8a09-e4695a89d953-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w\" (UID: \"79c1d6a0-9ed7-48c8-8a09-e4695a89d953\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.119127 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq\" (UID: \"eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.119147 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dwhg\" (UniqueName: \"kubernetes.io/projected/f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b-kube-api-access-7dwhg\") pod \"perses-operator-5446b9c989-kkzn2\" (UID: \"f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b\") " pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.124247 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/79c1d6a0-9ed7-48c8-8a09-e4695a89d953-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w\" (UID: \"79c1d6a0-9ed7-48c8-8a09-e4695a89d953\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.124756 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq\" (UID: \"eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.126733 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq\" (UID: \"eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.131848 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/dcdfb73e-765a-4fba-bdcb-0ca1cd215211-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-6t44d\" (UID: \"dcdfb73e-765a-4fba-bdcb-0ca1cd215211\") " pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.135096 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/79c1d6a0-9ed7-48c8-8a09-e4695a89d953-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w\" (UID: \"79c1d6a0-9ed7-48c8-8a09-e4695a89d953\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.137824 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s58vp\" (UniqueName: \"kubernetes.io/projected/21190c12-076c-4263-a68d-6dc4117e1d10-kube-api-access-s58vp\") pod \"obo-prometheus-operator-668cf9dfbb-dnvjn\" (UID: \"21190c12-076c-4263-a68d-6dc4117e1d10\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.143145 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7lkhq\" (UniqueName: \"kubernetes.io/projected/dcdfb73e-765a-4fba-bdcb-0ca1cd215211-kube-api-access-7lkhq\") pod \"observability-operator-d8bb48f5d-6t44d\" (UID: \"dcdfb73e-765a-4fba-bdcb-0ca1cd215211\") " pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.217606 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.219892 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b-openshift-service-ca\") pod \"perses-operator-5446b9c989-kkzn2\" (UID: \"f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b\") " pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.219943 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dwhg\" (UniqueName: \"kubernetes.io/projected/f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b-kube-api-access-7dwhg\") pod \"perses-operator-5446b9c989-kkzn2\" (UID: \"f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b\") " pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.220881 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b-openshift-service-ca\") pod \"perses-operator-5446b9c989-kkzn2\" (UID: \"f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b\") " pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.236278 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dwhg\" (UniqueName: \"kubernetes.io/projected/f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b-kube-api-access-7dwhg\") pod \"perses-operator-5446b9c989-kkzn2\" (UID: \"f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b\") " pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.240783 5039 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-dnvjn_openshift-operators_21190c12-076c-4263-a68d-6dc4117e1d10_0(476d4f7dda8aefed3c18b32d50ffb8c4dd471c5bb479307224d971650042ac91): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.240837 5039 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-dnvjn_openshift-operators_21190c12-076c-4263-a68d-6dc4117e1d10_0(476d4f7dda8aefed3c18b32d50ffb8c4dd471c5bb479307224d971650042ac91): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.240859 5039 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-dnvjn_openshift-operators_21190c12-076c-4263-a68d-6dc4117e1d10_0(476d4f7dda8aefed3c18b32d50ffb8c4dd471c5bb479307224d971650042ac91): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.240913 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-dnvjn_openshift-operators(21190c12-076c-4263-a68d-6dc4117e1d10)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-dnvjn_openshift-operators(21190c12-076c-4263-a68d-6dc4117e1d10)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-dnvjn_openshift-operators_21190c12-076c-4263-a68d-6dc4117e1d10_0(476d4f7dda8aefed3c18b32d50ffb8c4dd471c5bb479307224d971650042ac91): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" podUID="21190c12-076c-4263-a68d-6dc4117e1d10" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.265548 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.280546 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.299812 5039 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq_openshift-operators_eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e_0(bb60fec68b061906c618eb179cbd133f23042b69d21d5f7d2a039ff6df0adc1e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.299888 5039 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq_openshift-operators_eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e_0(bb60fec68b061906c618eb179cbd133f23042b69d21d5f7d2a039ff6df0adc1e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.299914 5039 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq_openshift-operators_eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e_0(bb60fec68b061906c618eb179cbd133f23042b69d21d5f7d2a039ff6df0adc1e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.299996 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq_openshift-operators(eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq_openshift-operators(eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq_openshift-operators_eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e_0(bb60fec68b061906c618eb179cbd133f23042b69d21d5f7d2a039ff6df0adc1e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" podUID="eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.307968 5039 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w_openshift-operators_79c1d6a0-9ed7-48c8-8a09-e4695a89d953_0(d9dc2eec493664fc6ac367c2ae13a4d2a9f2850d51b77ee9f7b9a9536d174a25): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.308016 5039 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w_openshift-operators_79c1d6a0-9ed7-48c8-8a09-e4695a89d953_0(d9dc2eec493664fc6ac367c2ae13a4d2a9f2850d51b77ee9f7b9a9536d174a25): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.308035 5039 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w_openshift-operators_79c1d6a0-9ed7-48c8-8a09-e4695a89d953_0(d9dc2eec493664fc6ac367c2ae13a4d2a9f2850d51b77ee9f7b9a9536d174a25): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.308067 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w_openshift-operators(79c1d6a0-9ed7-48c8-8a09-e4695a89d953)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w_openshift-operators(79c1d6a0-9ed7-48c8-8a09-e4695a89d953)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w_openshift-operators_79c1d6a0-9ed7-48c8-8a09-e4695a89d953_0(d9dc2eec493664fc6ac367c2ae13a4d2a9f2850d51b77ee9f7b9a9536d174a25): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" podUID="79c1d6a0-9ed7-48c8-8a09-e4695a89d953" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.318327 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.339060 5039 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6t44d_openshift-operators_dcdfb73e-765a-4fba-bdcb-0ca1cd215211_0(8e41a39a765cef7e90a9dec3fe71e9054b13cfb7f4f32bb5fbbe01a1adb852cb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.339139 5039 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6t44d_openshift-operators_dcdfb73e-765a-4fba-bdcb-0ca1cd215211_0(8e41a39a765cef7e90a9dec3fe71e9054b13cfb7f4f32bb5fbbe01a1adb852cb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.339175 5039 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6t44d_openshift-operators_dcdfb73e-765a-4fba-bdcb-0ca1cd215211_0(8e41a39a765cef7e90a9dec3fe71e9054b13cfb7f4f32bb5fbbe01a1adb852cb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.339222 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-6t44d_openshift-operators(dcdfb73e-765a-4fba-bdcb-0ca1cd215211)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-6t44d_openshift-operators(dcdfb73e-765a-4fba-bdcb-0ca1cd215211)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6t44d_openshift-operators_dcdfb73e-765a-4fba-bdcb-0ca1cd215211_0(8e41a39a765cef7e90a9dec3fe71e9054b13cfb7f4f32bb5fbbe01a1adb852cb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" podUID="dcdfb73e-765a-4fba-bdcb-0ca1cd215211" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.423876 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.452192 5039 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-kkzn2_openshift-operators_f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b_0(2334b14b78f914c675b1b17da572cfd536596bb66afdc56cd3b1d8fe5a213b75): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.452262 5039 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-kkzn2_openshift-operators_f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b_0(2334b14b78f914c675b1b17da572cfd536596bb66afdc56cd3b1d8fe5a213b75): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.452285 5039 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-kkzn2_openshift-operators_f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b_0(2334b14b78f914c675b1b17da572cfd536596bb66afdc56cd3b1d8fe5a213b75): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.452330 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-kkzn2_openshift-operators(f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-kkzn2_openshift-operators(f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-kkzn2_openshift-operators_f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b_0(2334b14b78f914c675b1b17da572cfd536596bb66afdc56cd3b1d8fe5a213b75): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" podUID="f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.874475 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq"] Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.882085 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn"] Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.894609 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-kkzn2"] Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.908633 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w"] Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.911257 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-6t44d"] Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.943335 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.943699 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.954682 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" event={"ID":"e46fcc9c-d5aa-456f-b440-9deed992d7c6","Type":"ContainerStarted","Data":"dc19bca0b7c04697ccc4b29f4c8029ed5625995ed00250e3543736b9d093a07f"} Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.954768 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.954990 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.962277 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.962658 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.963883 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.964291 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.964518 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.964850 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.965109 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.965136 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:25 crc kubenswrapper[5039]: I1124 13:28:25.965145 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.974103 5039 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6t44d_openshift-operators_dcdfb73e-765a-4fba-bdcb-0ca1cd215211_0(4b0001fa81abe7f35bbd84726f1e167e333f0ae863aa87f861bd18f94969eeef): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.974172 5039 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6t44d_openshift-operators_dcdfb73e-765a-4fba-bdcb-0ca1cd215211_0(4b0001fa81abe7f35bbd84726f1e167e333f0ae863aa87f861bd18f94969eeef): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.974201 5039 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6t44d_openshift-operators_dcdfb73e-765a-4fba-bdcb-0ca1cd215211_0(4b0001fa81abe7f35bbd84726f1e167e333f0ae863aa87f861bd18f94969eeef): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.974256 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-6t44d_openshift-operators(dcdfb73e-765a-4fba-bdcb-0ca1cd215211)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-6t44d_openshift-operators(dcdfb73e-765a-4fba-bdcb-0ca1cd215211)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6t44d_openshift-operators_dcdfb73e-765a-4fba-bdcb-0ca1cd215211_0(4b0001fa81abe7f35bbd84726f1e167e333f0ae863aa87f861bd18f94969eeef): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" podUID="dcdfb73e-765a-4fba-bdcb-0ca1cd215211" Nov 24 13:28:25 crc kubenswrapper[5039]: E1124 13:28:25.988255 5039 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf60bd1ab_ddc1_462f_85f9_e47d7305727d.slice/crio-48f3d375d73737b4f7b7da5d663cb06af34d5569a82a0b855a97dc97476e9731.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf60bd1ab_ddc1_462f_85f9_e47d7305727d.slice/crio-conmon-48f3d375d73737b4f7b7da5d663cb06af34d5569a82a0b855a97dc97476e9731.scope\": RecentStats: unable to find data in memory cache]" Nov 24 13:28:26 crc kubenswrapper[5039]: I1124 13:28:26.013622 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" podStartSLOduration=8.013598813 podStartE2EDuration="8.013598813s" podCreationTimestamp="2025-11-24 13:28:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:28:25.994067864 +0000 UTC m=+618.433192364" watchObservedRunningTime="2025-11-24 13:28:26.013598813 +0000 UTC m=+618.452723313" Nov 24 13:28:26 crc kubenswrapper[5039]: E1124 13:28:26.041096 5039 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-kkzn2_openshift-operators_f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b_0(019b4f4333d2c9fe2d0fc9aa3f3fa71d8b7f6a18bf973019872cf07ee212ab72): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 13:28:26 crc kubenswrapper[5039]: E1124 13:28:26.041173 5039 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-kkzn2_openshift-operators_f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b_0(019b4f4333d2c9fe2d0fc9aa3f3fa71d8b7f6a18bf973019872cf07ee212ab72): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:28:26 crc kubenswrapper[5039]: E1124 13:28:26.041200 5039 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-kkzn2_openshift-operators_f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b_0(019b4f4333d2c9fe2d0fc9aa3f3fa71d8b7f6a18bf973019872cf07ee212ab72): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:28:26 crc kubenswrapper[5039]: E1124 13:28:26.041251 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-kkzn2_openshift-operators(f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-kkzn2_openshift-operators(f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-kkzn2_openshift-operators_f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b_0(019b4f4333d2c9fe2d0fc9aa3f3fa71d8b7f6a18bf973019872cf07ee212ab72): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" podUID="f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b" Nov 24 13:28:26 crc kubenswrapper[5039]: I1124 13:28:26.041686 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:26 crc kubenswrapper[5039]: I1124 13:28:26.041959 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:26 crc kubenswrapper[5039]: E1124 13:28:26.063685 5039 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w_openshift-operators_79c1d6a0-9ed7-48c8-8a09-e4695a89d953_0(bfc2d2349618caad98654c2fe3b325adfae651c2e599fae6c5f32240f0c4c424): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 13:28:26 crc kubenswrapper[5039]: E1124 13:28:26.063750 5039 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w_openshift-operators_79c1d6a0-9ed7-48c8-8a09-e4695a89d953_0(bfc2d2349618caad98654c2fe3b325adfae651c2e599fae6c5f32240f0c4c424): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" Nov 24 13:28:26 crc kubenswrapper[5039]: E1124 13:28:26.063768 5039 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w_openshift-operators_79c1d6a0-9ed7-48c8-8a09-e4695a89d953_0(bfc2d2349618caad98654c2fe3b325adfae651c2e599fae6c5f32240f0c4c424): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" Nov 24 13:28:26 crc kubenswrapper[5039]: E1124 13:28:26.063819 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w_openshift-operators(79c1d6a0-9ed7-48c8-8a09-e4695a89d953)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w_openshift-operators(79c1d6a0-9ed7-48c8-8a09-e4695a89d953)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w_openshift-operators_79c1d6a0-9ed7-48c8-8a09-e4695a89d953_0(bfc2d2349618caad98654c2fe3b325adfae651c2e599fae6c5f32240f0c4c424): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" podUID="79c1d6a0-9ed7-48c8-8a09-e4695a89d953" Nov 24 13:28:26 crc kubenswrapper[5039]: E1124 13:28:26.065663 5039 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq_openshift-operators_eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e_0(85d80bd17c6eb365321be3bae20b7fa0196f480a285a53386b8460171046e217): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 13:28:26 crc kubenswrapper[5039]: E1124 13:28:26.065720 5039 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq_openshift-operators_eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e_0(85d80bd17c6eb365321be3bae20b7fa0196f480a285a53386b8460171046e217): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" Nov 24 13:28:26 crc kubenswrapper[5039]: E1124 13:28:26.065744 5039 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq_openshift-operators_eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e_0(85d80bd17c6eb365321be3bae20b7fa0196f480a285a53386b8460171046e217): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" Nov 24 13:28:26 crc kubenswrapper[5039]: E1124 13:28:26.065782 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq_openshift-operators(eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq_openshift-operators(eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq_openshift-operators_eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e_0(85d80bd17c6eb365321be3bae20b7fa0196f480a285a53386b8460171046e217): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" podUID="eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e" Nov 24 13:28:26 crc kubenswrapper[5039]: E1124 13:28:26.082828 5039 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-dnvjn_openshift-operators_21190c12-076c-4263-a68d-6dc4117e1d10_0(f528db70e5c67b8434a0a945c78f2ac90d4fc1f05a20997c19703b5466cb5f6e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 13:28:26 crc kubenswrapper[5039]: E1124 13:28:26.082885 5039 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-dnvjn_openshift-operators_21190c12-076c-4263-a68d-6dc4117e1d10_0(f528db70e5c67b8434a0a945c78f2ac90d4fc1f05a20997c19703b5466cb5f6e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" Nov 24 13:28:26 crc kubenswrapper[5039]: E1124 13:28:26.082906 5039 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-dnvjn_openshift-operators_21190c12-076c-4263-a68d-6dc4117e1d10_0(f528db70e5c67b8434a0a945c78f2ac90d4fc1f05a20997c19703b5466cb5f6e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" Nov 24 13:28:26 crc kubenswrapper[5039]: E1124 13:28:26.082942 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-dnvjn_openshift-operators(21190c12-076c-4263-a68d-6dc4117e1d10)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-dnvjn_openshift-operators(21190c12-076c-4263-a68d-6dc4117e1d10)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-dnvjn_openshift-operators_21190c12-076c-4263-a68d-6dc4117e1d10_0(f528db70e5c67b8434a0a945c78f2ac90d4fc1f05a20997c19703b5466cb5f6e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" podUID="21190c12-076c-4263-a68d-6dc4117e1d10" Nov 24 13:28:34 crc kubenswrapper[5039]: I1124 13:28:34.307089 5039 scope.go:117] "RemoveContainer" containerID="afbc25e2b688679dbfe2c40bce4636e6482ec1605d671d0aa8e10a779e2f545a" Nov 24 13:28:34 crc kubenswrapper[5039]: E1124 13:28:34.307664 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-kr94g_openshift-multus(6c18c830-d513-4df0-be92-cd44f2d2c5df)\"" pod="openshift-multus/multus-kr94g" podUID="6c18c830-d513-4df0-be92-cd44f2d2c5df" Nov 24 13:28:36 crc kubenswrapper[5039]: E1124 13:28:36.116391 5039 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf60bd1ab_ddc1_462f_85f9_e47d7305727d.slice/crio-conmon-48f3d375d73737b4f7b7da5d663cb06af34d5569a82a0b855a97dc97476e9731.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf60bd1ab_ddc1_462f_85f9_e47d7305727d.slice/crio-48f3d375d73737b4f7b7da5d663cb06af34d5569a82a0b855a97dc97476e9731.scope\": RecentStats: unable to find data in memory cache]" Nov 24 13:28:36 crc kubenswrapper[5039]: I1124 13:28:36.306665 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:36 crc kubenswrapper[5039]: I1124 13:28:36.307396 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:36 crc kubenswrapper[5039]: E1124 13:28:36.336611 5039 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6t44d_openshift-operators_dcdfb73e-765a-4fba-bdcb-0ca1cd215211_0(d9a648001ba0d744e68403feb4183ed07cb3a1494c5574e019d14399a94ecfe7): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 13:28:36 crc kubenswrapper[5039]: E1124 13:28:36.336702 5039 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6t44d_openshift-operators_dcdfb73e-765a-4fba-bdcb-0ca1cd215211_0(d9a648001ba0d744e68403feb4183ed07cb3a1494c5574e019d14399a94ecfe7): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:36 crc kubenswrapper[5039]: E1124 13:28:36.336728 5039 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6t44d_openshift-operators_dcdfb73e-765a-4fba-bdcb-0ca1cd215211_0(d9a648001ba0d744e68403feb4183ed07cb3a1494c5574e019d14399a94ecfe7): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:36 crc kubenswrapper[5039]: E1124 13:28:36.336787 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-6t44d_openshift-operators(dcdfb73e-765a-4fba-bdcb-0ca1cd215211)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-6t44d_openshift-operators(dcdfb73e-765a-4fba-bdcb-0ca1cd215211)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6t44d_openshift-operators_dcdfb73e-765a-4fba-bdcb-0ca1cd215211_0(d9a648001ba0d744e68403feb4183ed07cb3a1494c5574e019d14399a94ecfe7): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" podUID="dcdfb73e-765a-4fba-bdcb-0ca1cd215211" Nov 24 13:28:38 crc kubenswrapper[5039]: I1124 13:28:38.306207 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:28:38 crc kubenswrapper[5039]: I1124 13:28:38.309933 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:28:38 crc kubenswrapper[5039]: E1124 13:28:38.343994 5039 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-kkzn2_openshift-operators_f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b_0(e5dc4f051fc0f1cb9f5bea87e8ee0d0b44f194c748c906760e1fe7029c020b42): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 13:28:38 crc kubenswrapper[5039]: E1124 13:28:38.344072 5039 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-kkzn2_openshift-operators_f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b_0(e5dc4f051fc0f1cb9f5bea87e8ee0d0b44f194c748c906760e1fe7029c020b42): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:28:38 crc kubenswrapper[5039]: E1124 13:28:38.344098 5039 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-kkzn2_openshift-operators_f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b_0(e5dc4f051fc0f1cb9f5bea87e8ee0d0b44f194c748c906760e1fe7029c020b42): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:28:38 crc kubenswrapper[5039]: E1124 13:28:38.344154 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-kkzn2_openshift-operators(f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-kkzn2_openshift-operators(f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-kkzn2_openshift-operators_f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b_0(e5dc4f051fc0f1cb9f5bea87e8ee0d0b44f194c748c906760e1fe7029c020b42): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" podUID="f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b" Nov 24 13:28:39 crc kubenswrapper[5039]: I1124 13:28:39.305910 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" Nov 24 13:28:39 crc kubenswrapper[5039]: I1124 13:28:39.305988 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" Nov 24 13:28:39 crc kubenswrapper[5039]: I1124 13:28:39.306799 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" Nov 24 13:28:39 crc kubenswrapper[5039]: I1124 13:28:39.306805 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" Nov 24 13:28:39 crc kubenswrapper[5039]: E1124 13:28:39.348966 5039 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-dnvjn_openshift-operators_21190c12-076c-4263-a68d-6dc4117e1d10_0(7134512d7d789fb514fb6a3dde5f9df4f8d1288d826b43fc7e248bd557fe376e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 13:28:39 crc kubenswrapper[5039]: E1124 13:28:39.349046 5039 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-dnvjn_openshift-operators_21190c12-076c-4263-a68d-6dc4117e1d10_0(7134512d7d789fb514fb6a3dde5f9df4f8d1288d826b43fc7e248bd557fe376e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" Nov 24 13:28:39 crc kubenswrapper[5039]: E1124 13:28:39.349075 5039 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-dnvjn_openshift-operators_21190c12-076c-4263-a68d-6dc4117e1d10_0(7134512d7d789fb514fb6a3dde5f9df4f8d1288d826b43fc7e248bd557fe376e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" Nov 24 13:28:39 crc kubenswrapper[5039]: E1124 13:28:39.349144 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-dnvjn_openshift-operators(21190c12-076c-4263-a68d-6dc4117e1d10)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-dnvjn_openshift-operators(21190c12-076c-4263-a68d-6dc4117e1d10)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-dnvjn_openshift-operators_21190c12-076c-4263-a68d-6dc4117e1d10_0(7134512d7d789fb514fb6a3dde5f9df4f8d1288d826b43fc7e248bd557fe376e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" podUID="21190c12-076c-4263-a68d-6dc4117e1d10" Nov 24 13:28:39 crc kubenswrapper[5039]: E1124 13:28:39.382195 5039 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq_openshift-operators_eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e_0(492ddc293279fc738bbbd20b187948385e636dcffecf68c41b97978486f52678): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 13:28:39 crc kubenswrapper[5039]: E1124 13:28:39.382258 5039 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq_openshift-operators_eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e_0(492ddc293279fc738bbbd20b187948385e636dcffecf68c41b97978486f52678): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" Nov 24 13:28:39 crc kubenswrapper[5039]: E1124 13:28:39.382280 5039 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq_openshift-operators_eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e_0(492ddc293279fc738bbbd20b187948385e636dcffecf68c41b97978486f52678): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" Nov 24 13:28:39 crc kubenswrapper[5039]: E1124 13:28:39.382336 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq_openshift-operators(eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq_openshift-operators(eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq_openshift-operators_eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e_0(492ddc293279fc738bbbd20b187948385e636dcffecf68c41b97978486f52678): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" podUID="eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e" Nov 24 13:28:40 crc kubenswrapper[5039]: I1124 13:28:40.305901 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" Nov 24 13:28:40 crc kubenswrapper[5039]: I1124 13:28:40.306381 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" Nov 24 13:28:40 crc kubenswrapper[5039]: E1124 13:28:40.333483 5039 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w_openshift-operators_79c1d6a0-9ed7-48c8-8a09-e4695a89d953_0(54dffcc2140857e16590f60df3d3ba0447518461728370f709a958860749d4fb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 13:28:40 crc kubenswrapper[5039]: E1124 13:28:40.333568 5039 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w_openshift-operators_79c1d6a0-9ed7-48c8-8a09-e4695a89d953_0(54dffcc2140857e16590f60df3d3ba0447518461728370f709a958860749d4fb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" Nov 24 13:28:40 crc kubenswrapper[5039]: E1124 13:28:40.333590 5039 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w_openshift-operators_79c1d6a0-9ed7-48c8-8a09-e4695a89d953_0(54dffcc2140857e16590f60df3d3ba0447518461728370f709a958860749d4fb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" Nov 24 13:28:40 crc kubenswrapper[5039]: E1124 13:28:40.333644 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w_openshift-operators(79c1d6a0-9ed7-48c8-8a09-e4695a89d953)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w_openshift-operators(79c1d6a0-9ed7-48c8-8a09-e4695a89d953)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w_openshift-operators_79c1d6a0-9ed7-48c8-8a09-e4695a89d953_0(54dffcc2140857e16590f60df3d3ba0447518461728370f709a958860749d4fb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" podUID="79c1d6a0-9ed7-48c8-8a09-e4695a89d953" Nov 24 13:28:45 crc kubenswrapper[5039]: I1124 13:28:45.306927 5039 scope.go:117] "RemoveContainer" containerID="afbc25e2b688679dbfe2c40bce4636e6482ec1605d671d0aa8e10a779e2f545a" Nov 24 13:28:46 crc kubenswrapper[5039]: I1124 13:28:46.033613 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kr94g_6c18c830-d513-4df0-be92-cd44f2d2c5df/kube-multus/2.log" Nov 24 13:28:46 crc kubenswrapper[5039]: I1124 13:28:46.034142 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kr94g_6c18c830-d513-4df0-be92-cd44f2d2c5df/kube-multus/1.log" Nov 24 13:28:46 crc kubenswrapper[5039]: I1124 13:28:46.034194 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kr94g" event={"ID":"6c18c830-d513-4df0-be92-cd44f2d2c5df","Type":"ContainerStarted","Data":"788df3677b5e4e526240ce58b91f877de9b16111d31d105469d2c202949b5942"} Nov 24 13:28:46 crc kubenswrapper[5039]: E1124 13:28:46.225757 5039 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf60bd1ab_ddc1_462f_85f9_e47d7305727d.slice/crio-conmon-48f3d375d73737b4f7b7da5d663cb06af34d5569a82a0b855a97dc97476e9731.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf60bd1ab_ddc1_462f_85f9_e47d7305727d.slice/crio-48f3d375d73737b4f7b7da5d663cb06af34d5569a82a0b855a97dc97476e9731.scope\": RecentStats: unable to find data in memory cache]" Nov 24 13:28:47 crc kubenswrapper[5039]: I1124 13:28:47.306358 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:47 crc kubenswrapper[5039]: I1124 13:28:47.307492 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:47 crc kubenswrapper[5039]: I1124 13:28:47.723110 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-6t44d"] Nov 24 13:28:48 crc kubenswrapper[5039]: I1124 13:28:48.048423 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" event={"ID":"dcdfb73e-765a-4fba-bdcb-0ca1cd215211","Type":"ContainerStarted","Data":"f0bbc27e40364295f1822dbca4cab2a9633789913d2c3b0aaade867ebb9d6c23"} Nov 24 13:28:49 crc kubenswrapper[5039]: I1124 13:28:49.380562 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-8hz2p" Nov 24 13:28:51 crc kubenswrapper[5039]: I1124 13:28:51.306119 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:28:51 crc kubenswrapper[5039]: I1124 13:28:51.306926 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:28:53 crc kubenswrapper[5039]: I1124 13:28:53.306229 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" Nov 24 13:28:53 crc kubenswrapper[5039]: I1124 13:28:53.306982 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" Nov 24 13:28:54 crc kubenswrapper[5039]: I1124 13:28:54.305737 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" Nov 24 13:28:54 crc kubenswrapper[5039]: I1124 13:28:54.305734 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" Nov 24 13:28:54 crc kubenswrapper[5039]: I1124 13:28:54.306534 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" Nov 24 13:28:54 crc kubenswrapper[5039]: I1124 13:28:54.306541 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" Nov 24 13:28:55 crc kubenswrapper[5039]: I1124 13:28:55.577626 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn"] Nov 24 13:28:55 crc kubenswrapper[5039]: W1124 13:28:55.582863 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod21190c12_076c_4263_a68d_6dc4117e1d10.slice/crio-017efe5d80d877ed9ec1a0c52b7eb4340d1ab17914920a5acd7a6f0ee6db57e2 WatchSource:0}: Error finding container 017efe5d80d877ed9ec1a0c52b7eb4340d1ab17914920a5acd7a6f0ee6db57e2: Status 404 returned error can't find the container with id 017efe5d80d877ed9ec1a0c52b7eb4340d1ab17914920a5acd7a6f0ee6db57e2 Nov 24 13:28:55 crc kubenswrapper[5039]: I1124 13:28:55.677832 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq"] Nov 24 13:28:55 crc kubenswrapper[5039]: I1124 13:28:55.681302 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w"] Nov 24 13:28:55 crc kubenswrapper[5039]: I1124 13:28:55.683852 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-kkzn2"] Nov 24 13:28:55 crc kubenswrapper[5039]: W1124 13:28:55.696466 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf6b4ca0c_993f_4e33_b14a_ff3c6c12ef7b.slice/crio-6a2ca48bacabb4c92c73378f9c1818cfaa00e00b4a2cd1ff21596192fb3aa915 WatchSource:0}: Error finding container 6a2ca48bacabb4c92c73378f9c1818cfaa00e00b4a2cd1ff21596192fb3aa915: Status 404 returned error can't find the container with id 6a2ca48bacabb4c92c73378f9c1818cfaa00e00b4a2cd1ff21596192fb3aa915 Nov 24 13:28:55 crc kubenswrapper[5039]: W1124 13:28:55.699874 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod79c1d6a0_9ed7_48c8_8a09_e4695a89d953.slice/crio-7e113402590ff07c3ffe03b53b3481b9d5eddafc2861ed13d4fa97b9b4b4ed98 WatchSource:0}: Error finding container 7e113402590ff07c3ffe03b53b3481b9d5eddafc2861ed13d4fa97b9b4b4ed98: Status 404 returned error can't find the container with id 7e113402590ff07c3ffe03b53b3481b9d5eddafc2861ed13d4fa97b9b4b4ed98 Nov 24 13:28:56 crc kubenswrapper[5039]: I1124 13:28:56.102597 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" event={"ID":"21190c12-076c-4263-a68d-6dc4117e1d10","Type":"ContainerStarted","Data":"017efe5d80d877ed9ec1a0c52b7eb4340d1ab17914920a5acd7a6f0ee6db57e2"} Nov 24 13:28:56 crc kubenswrapper[5039]: I1124 13:28:56.103730 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" event={"ID":"dcdfb73e-765a-4fba-bdcb-0ca1cd215211","Type":"ContainerStarted","Data":"163a03062766e51610eeec6bd28591aaedd288f072fe8a8a0d51689bd977b4ba"} Nov 24 13:28:56 crc kubenswrapper[5039]: I1124 13:28:56.103940 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:56 crc kubenswrapper[5039]: I1124 13:28:56.105149 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" event={"ID":"f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b","Type":"ContainerStarted","Data":"6a2ca48bacabb4c92c73378f9c1818cfaa00e00b4a2cd1ff21596192fb3aa915"} Nov 24 13:28:56 crc kubenswrapper[5039]: I1124 13:28:56.106347 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" event={"ID":"79c1d6a0-9ed7-48c8-8a09-e4695a89d953","Type":"ContainerStarted","Data":"7e113402590ff07c3ffe03b53b3481b9d5eddafc2861ed13d4fa97b9b4b4ed98"} Nov 24 13:28:56 crc kubenswrapper[5039]: I1124 13:28:56.107561 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" event={"ID":"eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e","Type":"ContainerStarted","Data":"40fd8d0213d80294cd279e6ae026fa52a4a5ba95b8dbbf29b3c2e04e9c83a219"} Nov 24 13:28:56 crc kubenswrapper[5039]: I1124 13:28:56.127735 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" podStartSLOduration=24.407862163 podStartE2EDuration="32.127718223s" podCreationTimestamp="2025-11-24 13:28:24 +0000 UTC" firstStartedPulling="2025-11-24 13:28:47.742995652 +0000 UTC m=+640.182120152" lastFinishedPulling="2025-11-24 13:28:55.462851712 +0000 UTC m=+647.901976212" observedRunningTime="2025-11-24 13:28:56.126243411 +0000 UTC m=+648.565367931" watchObservedRunningTime="2025-11-24 13:28:56.127718223 +0000 UTC m=+648.566842723" Nov 24 13:28:56 crc kubenswrapper[5039]: I1124 13:28:56.176320 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-6t44d" Nov 24 13:28:56 crc kubenswrapper[5039]: E1124 13:28:56.390701 5039 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf60bd1ab_ddc1_462f_85f9_e47d7305727d.slice/crio-48f3d375d73737b4f7b7da5d663cb06af34d5569a82a0b855a97dc97476e9731.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf60bd1ab_ddc1_462f_85f9_e47d7305727d.slice/crio-conmon-48f3d375d73737b4f7b7da5d663cb06af34d5569a82a0b855a97dc97476e9731.scope\": RecentStats: unable to find data in memory cache]" Nov 24 13:29:00 crc kubenswrapper[5039]: I1124 13:29:00.138590 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" event={"ID":"f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b","Type":"ContainerStarted","Data":"0353e94542a46af5e2cf82f06570ca88cb4f47096f8890b8fd57237c8a3110c7"} Nov 24 13:29:00 crc kubenswrapper[5039]: I1124 13:29:00.139915 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:29:00 crc kubenswrapper[5039]: I1124 13:29:00.141821 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" event={"ID":"79c1d6a0-9ed7-48c8-8a09-e4695a89d953","Type":"ContainerStarted","Data":"33038f73bc5ec7b7ce51c8303038322d8cdc7a37e4ae0ad02bfabe03f6e16191"} Nov 24 13:29:00 crc kubenswrapper[5039]: I1124 13:29:00.143746 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" event={"ID":"eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e","Type":"ContainerStarted","Data":"721aa782ae4b30e7cf501cb0674ad8a83301841207ab7aff48612428068e726e"} Nov 24 13:29:00 crc kubenswrapper[5039]: I1124 13:29:00.148652 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" event={"ID":"21190c12-076c-4263-a68d-6dc4117e1d10","Type":"ContainerStarted","Data":"201757cabb9e90b7c2787f09d2da0c098b8e7f1e37cfcc213d6078c33da34de9"} Nov 24 13:29:00 crc kubenswrapper[5039]: I1124 13:29:00.160288 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" podStartSLOduration=31.695952453 podStartE2EDuration="35.160267785s" podCreationTimestamp="2025-11-24 13:28:25 +0000 UTC" firstStartedPulling="2025-11-24 13:28:55.703536725 +0000 UTC m=+648.142661225" lastFinishedPulling="2025-11-24 13:28:59.167852057 +0000 UTC m=+651.606976557" observedRunningTime="2025-11-24 13:29:00.156514557 +0000 UTC m=+652.595639077" watchObservedRunningTime="2025-11-24 13:29:00.160267785 +0000 UTC m=+652.599392285" Nov 24 13:29:00 crc kubenswrapper[5039]: I1124 13:29:00.177036 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq" podStartSLOduration=32.715343259 podStartE2EDuration="36.177019155s" podCreationTimestamp="2025-11-24 13:28:24 +0000 UTC" firstStartedPulling="2025-11-24 13:28:55.698038767 +0000 UTC m=+648.137163267" lastFinishedPulling="2025-11-24 13:28:59.159714653 +0000 UTC m=+651.598839163" observedRunningTime="2025-11-24 13:29:00.176097674 +0000 UTC m=+652.615222174" watchObservedRunningTime="2025-11-24 13:29:00.177019155 +0000 UTC m=+652.616143655" Nov 24 13:29:00 crc kubenswrapper[5039]: I1124 13:29:00.196676 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w" podStartSLOduration=32.74123136 podStartE2EDuration="36.196657564s" podCreationTimestamp="2025-11-24 13:28:24 +0000 UTC" firstStartedPulling="2025-11-24 13:28:55.70518721 +0000 UTC m=+648.144311710" lastFinishedPulling="2025-11-24 13:28:59.160613414 +0000 UTC m=+651.599737914" observedRunningTime="2025-11-24 13:29:00.196197253 +0000 UTC m=+652.635321753" watchObservedRunningTime="2025-11-24 13:29:00.196657564 +0000 UTC m=+652.635782064" Nov 24 13:29:00 crc kubenswrapper[5039]: I1124 13:29:00.221125 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-dnvjn" podStartSLOduration=32.642036012 podStartE2EDuration="36.221106958s" podCreationTimestamp="2025-11-24 13:28:24 +0000 UTC" firstStartedPulling="2025-11-24 13:28:55.585949253 +0000 UTC m=+648.025073753" lastFinishedPulling="2025-11-24 13:28:59.165020199 +0000 UTC m=+651.604144699" observedRunningTime="2025-11-24 13:29:00.219612283 +0000 UTC m=+652.658736823" watchObservedRunningTime="2025-11-24 13:29:00.221106958 +0000 UTC m=+652.660231458" Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.426780 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-kkzn2" Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.674396 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-dwbfg"] Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.675384 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-dwbfg" Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.677534 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.677584 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.677857 5039 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-cf9ns" Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.679958 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-dwbfg"] Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.717319 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-9rmz6"] Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.718941 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-9rmz6" Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.722627 5039 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-7wbks" Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.723623 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-8kpc5"] Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.724580 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-8kpc5" Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.726102 5039 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-tclwh" Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.731165 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-9rmz6"] Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.736853 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-8kpc5"] Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.837193 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4wj5\" (UniqueName: \"kubernetes.io/projected/2526d128-0579-4f6f-9327-12ac7fe30e96-kube-api-access-z4wj5\") pod \"cert-manager-webhook-5655c58dd6-9rmz6\" (UID: \"2526d128-0579-4f6f-9327-12ac7fe30e96\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-9rmz6" Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.837451 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2zvf\" (UniqueName: \"kubernetes.io/projected/ab33654d-a27e-4922-87c3-37d387a8dfa6-kube-api-access-d2zvf\") pod \"cert-manager-5b446d88c5-8kpc5\" (UID: \"ab33654d-a27e-4922-87c3-37d387a8dfa6\") " pod="cert-manager/cert-manager-5b446d88c5-8kpc5" Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.837516 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4f6jw\" (UniqueName: \"kubernetes.io/projected/e8bfdf0d-df1c-4dda-8c3d-8113eee0ad4a-kube-api-access-4f6jw\") pod \"cert-manager-cainjector-7f985d654d-dwbfg\" (UID: \"e8bfdf0d-df1c-4dda-8c3d-8113eee0ad4a\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-dwbfg" Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.938929 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4f6jw\" (UniqueName: \"kubernetes.io/projected/e8bfdf0d-df1c-4dda-8c3d-8113eee0ad4a-kube-api-access-4f6jw\") pod \"cert-manager-cainjector-7f985d654d-dwbfg\" (UID: \"e8bfdf0d-df1c-4dda-8c3d-8113eee0ad4a\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-dwbfg" Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.939017 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4wj5\" (UniqueName: \"kubernetes.io/projected/2526d128-0579-4f6f-9327-12ac7fe30e96-kube-api-access-z4wj5\") pod \"cert-manager-webhook-5655c58dd6-9rmz6\" (UID: \"2526d128-0579-4f6f-9327-12ac7fe30e96\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-9rmz6" Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.939043 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2zvf\" (UniqueName: \"kubernetes.io/projected/ab33654d-a27e-4922-87c3-37d387a8dfa6-kube-api-access-d2zvf\") pod \"cert-manager-5b446d88c5-8kpc5\" (UID: \"ab33654d-a27e-4922-87c3-37d387a8dfa6\") " pod="cert-manager/cert-manager-5b446d88c5-8kpc5" Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.956652 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2zvf\" (UniqueName: \"kubernetes.io/projected/ab33654d-a27e-4922-87c3-37d387a8dfa6-kube-api-access-d2zvf\") pod \"cert-manager-5b446d88c5-8kpc5\" (UID: \"ab33654d-a27e-4922-87c3-37d387a8dfa6\") " pod="cert-manager/cert-manager-5b446d88c5-8kpc5" Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.958581 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4wj5\" (UniqueName: \"kubernetes.io/projected/2526d128-0579-4f6f-9327-12ac7fe30e96-kube-api-access-z4wj5\") pod \"cert-manager-webhook-5655c58dd6-9rmz6\" (UID: \"2526d128-0579-4f6f-9327-12ac7fe30e96\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-9rmz6" Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.963371 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4f6jw\" (UniqueName: \"kubernetes.io/projected/e8bfdf0d-df1c-4dda-8c3d-8113eee0ad4a-kube-api-access-4f6jw\") pod \"cert-manager-cainjector-7f985d654d-dwbfg\" (UID: \"e8bfdf0d-df1c-4dda-8c3d-8113eee0ad4a\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-dwbfg" Nov 24 13:29:05 crc kubenswrapper[5039]: I1124 13:29:05.990185 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-dwbfg" Nov 24 13:29:06 crc kubenswrapper[5039]: I1124 13:29:06.038727 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-9rmz6" Nov 24 13:29:06 crc kubenswrapper[5039]: I1124 13:29:06.048389 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-8kpc5" Nov 24 13:29:06 crc kubenswrapper[5039]: I1124 13:29:06.256172 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-dwbfg"] Nov 24 13:29:06 crc kubenswrapper[5039]: I1124 13:29:06.295654 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-9rmz6"] Nov 24 13:29:06 crc kubenswrapper[5039]: E1124 13:29:06.512836 5039 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf60bd1ab_ddc1_462f_85f9_e47d7305727d.slice/crio-conmon-48f3d375d73737b4f7b7da5d663cb06af34d5569a82a0b855a97dc97476e9731.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf60bd1ab_ddc1_462f_85f9_e47d7305727d.slice/crio-48f3d375d73737b4f7b7da5d663cb06af34d5569a82a0b855a97dc97476e9731.scope\": RecentStats: unable to find data in memory cache]" Nov 24 13:29:06 crc kubenswrapper[5039]: I1124 13:29:06.539012 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-8kpc5"] Nov 24 13:29:06 crc kubenswrapper[5039]: W1124 13:29:06.542710 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podab33654d_a27e_4922_87c3_37d387a8dfa6.slice/crio-d4f722c5344c278c4cfbc53b56fa532ea60c346b1097b88e09c742b51f5cecd1 WatchSource:0}: Error finding container d4f722c5344c278c4cfbc53b56fa532ea60c346b1097b88e09c742b51f5cecd1: Status 404 returned error can't find the container with id d4f722c5344c278c4cfbc53b56fa532ea60c346b1097b88e09c742b51f5cecd1 Nov 24 13:29:07 crc kubenswrapper[5039]: I1124 13:29:07.191707 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-dwbfg" event={"ID":"e8bfdf0d-df1c-4dda-8c3d-8113eee0ad4a","Type":"ContainerStarted","Data":"23ed36ac44423f2bc70e2a0792d1045cc3fdd5a2ed2f56ad962a8b1e0cdb46f2"} Nov 24 13:29:07 crc kubenswrapper[5039]: I1124 13:29:07.192624 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-8kpc5" event={"ID":"ab33654d-a27e-4922-87c3-37d387a8dfa6","Type":"ContainerStarted","Data":"d4f722c5344c278c4cfbc53b56fa532ea60c346b1097b88e09c742b51f5cecd1"} Nov 24 13:29:07 crc kubenswrapper[5039]: I1124 13:29:07.193554 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-9rmz6" event={"ID":"2526d128-0579-4f6f-9327-12ac7fe30e96","Type":"ContainerStarted","Data":"3f187d2b0bb10f09069762e98856b0625651dcfe6b623f1bcdf844ac6edc5036"} Nov 24 13:29:08 crc kubenswrapper[5039]: I1124 13:29:08.539383 5039 scope.go:117] "RemoveContainer" containerID="8f68c347316af28eef4d9d661fff4ef8497e81704ecbdb6794e54ba842a37e20" Nov 24 13:29:09 crc kubenswrapper[5039]: I1124 13:29:09.205479 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kr94g_6c18c830-d513-4df0-be92-cd44f2d2c5df/kube-multus/2.log" Nov 24 13:29:12 crc kubenswrapper[5039]: I1124 13:29:12.246848 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-9rmz6" event={"ID":"2526d128-0579-4f6f-9327-12ac7fe30e96","Type":"ContainerStarted","Data":"8dd316bbd6148d48e80b10216a302292f4ecbb1d79af33767aca163b54201125"} Nov 24 13:29:12 crc kubenswrapper[5039]: I1124 13:29:12.247382 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-9rmz6" Nov 24 13:29:12 crc kubenswrapper[5039]: I1124 13:29:12.263361 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-9rmz6" podStartSLOduration=2.342969848 podStartE2EDuration="7.263322927s" podCreationTimestamp="2025-11-24 13:29:05 +0000 UTC" firstStartedPulling="2025-11-24 13:29:06.299021779 +0000 UTC m=+658.738146279" lastFinishedPulling="2025-11-24 13:29:11.219374858 +0000 UTC m=+663.658499358" observedRunningTime="2025-11-24 13:29:12.260439508 +0000 UTC m=+664.699564008" watchObservedRunningTime="2025-11-24 13:29:12.263322927 +0000 UTC m=+664.702447427" Nov 24 13:29:13 crc kubenswrapper[5039]: I1124 13:29:13.272247 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-dwbfg" event={"ID":"e8bfdf0d-df1c-4dda-8c3d-8113eee0ad4a","Type":"ContainerStarted","Data":"8f37a4396241f1e43b1d163a127ef281c8a4ef1096f8df6948dbe36d2187699a"} Nov 24 13:29:13 crc kubenswrapper[5039]: I1124 13:29:13.274628 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-8kpc5" event={"ID":"ab33654d-a27e-4922-87c3-37d387a8dfa6","Type":"ContainerStarted","Data":"088c6c7e19d4593baadf8963e02e604eaf3a8e8f3a50fbed133044265d5abf6d"} Nov 24 13:29:13 crc kubenswrapper[5039]: I1124 13:29:13.292366 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-dwbfg" podStartSLOduration=2.081302133 podStartE2EDuration="8.292349421s" podCreationTimestamp="2025-11-24 13:29:05 +0000 UTC" firstStartedPulling="2025-11-24 13:29:06.267007965 +0000 UTC m=+658.706132455" lastFinishedPulling="2025-11-24 13:29:12.478055243 +0000 UTC m=+664.917179743" observedRunningTime="2025-11-24 13:29:13.290792354 +0000 UTC m=+665.729916864" watchObservedRunningTime="2025-11-24 13:29:13.292349421 +0000 UTC m=+665.731473931" Nov 24 13:29:13 crc kubenswrapper[5039]: I1124 13:29:13.312855 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-8kpc5" podStartSLOduration=2.31240902 podStartE2EDuration="8.31283413s" podCreationTimestamp="2025-11-24 13:29:05 +0000 UTC" firstStartedPulling="2025-11-24 13:29:06.545558044 +0000 UTC m=+658.984682544" lastFinishedPulling="2025-11-24 13:29:12.545983154 +0000 UTC m=+664.985107654" observedRunningTime="2025-11-24 13:29:13.310831293 +0000 UTC m=+665.749955803" watchObservedRunningTime="2025-11-24 13:29:13.31283413 +0000 UTC m=+665.751958650" Nov 24 13:29:16 crc kubenswrapper[5039]: I1124 13:29:16.040899 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-9rmz6" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.065667 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6"] Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.067014 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.069880 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.088488 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6"] Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.111690 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8153476f-9f52-4a9b-9976-f71664f6f667-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6\" (UID: \"8153476f-9f52-4a9b-9976-f71664f6f667\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.112018 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6d2c\" (UniqueName: \"kubernetes.io/projected/8153476f-9f52-4a9b-9976-f71664f6f667-kube-api-access-l6d2c\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6\" (UID: \"8153476f-9f52-4a9b-9976-f71664f6f667\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.112141 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8153476f-9f52-4a9b-9976-f71664f6f667-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6\" (UID: \"8153476f-9f52-4a9b-9976-f71664f6f667\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.213455 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8153476f-9f52-4a9b-9976-f71664f6f667-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6\" (UID: \"8153476f-9f52-4a9b-9976-f71664f6f667\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.213512 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6d2c\" (UniqueName: \"kubernetes.io/projected/8153476f-9f52-4a9b-9976-f71664f6f667-kube-api-access-l6d2c\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6\" (UID: \"8153476f-9f52-4a9b-9976-f71664f6f667\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.213545 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8153476f-9f52-4a9b-9976-f71664f6f667-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6\" (UID: \"8153476f-9f52-4a9b-9976-f71664f6f667\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.214035 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8153476f-9f52-4a9b-9976-f71664f6f667-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6\" (UID: \"8153476f-9f52-4a9b-9976-f71664f6f667\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.214116 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8153476f-9f52-4a9b-9976-f71664f6f667-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6\" (UID: \"8153476f-9f52-4a9b-9976-f71664f6f667\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.241369 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6d2c\" (UniqueName: \"kubernetes.io/projected/8153476f-9f52-4a9b-9976-f71664f6f667-kube-api-access-l6d2c\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6\" (UID: \"8153476f-9f52-4a9b-9976-f71664f6f667\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.247430 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz"] Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.248393 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.257427 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz"] Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.314793 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d615d58f-8a19-4226-a022-26c3c2f46eaa-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz\" (UID: \"d615d58f-8a19-4226-a022-26c3c2f46eaa\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.314898 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvfdh\" (UniqueName: \"kubernetes.io/projected/d615d58f-8a19-4226-a022-26c3c2f46eaa-kube-api-access-nvfdh\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz\" (UID: \"d615d58f-8a19-4226-a022-26c3c2f46eaa\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.314961 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d615d58f-8a19-4226-a022-26c3c2f46eaa-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz\" (UID: \"d615d58f-8a19-4226-a022-26c3c2f46eaa\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.379879 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.415785 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d615d58f-8a19-4226-a022-26c3c2f46eaa-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz\" (UID: \"d615d58f-8a19-4226-a022-26c3c2f46eaa\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.415941 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvfdh\" (UniqueName: \"kubernetes.io/projected/d615d58f-8a19-4226-a022-26c3c2f46eaa-kube-api-access-nvfdh\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz\" (UID: \"d615d58f-8a19-4226-a022-26c3c2f46eaa\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.415996 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d615d58f-8a19-4226-a022-26c3c2f46eaa-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz\" (UID: \"d615d58f-8a19-4226-a022-26c3c2f46eaa\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.416616 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d615d58f-8a19-4226-a022-26c3c2f46eaa-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz\" (UID: \"d615d58f-8a19-4226-a022-26c3c2f46eaa\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.416881 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d615d58f-8a19-4226-a022-26c3c2f46eaa-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz\" (UID: \"d615d58f-8a19-4226-a022-26c3c2f46eaa\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.437142 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvfdh\" (UniqueName: \"kubernetes.io/projected/d615d58f-8a19-4226-a022-26c3c2f46eaa-kube-api-access-nvfdh\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz\" (UID: \"d615d58f-8a19-4226-a022-26c3c2f46eaa\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.564909 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz" Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.571669 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6"] Nov 24 13:29:42 crc kubenswrapper[5039]: I1124 13:29:42.805025 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz"] Nov 24 13:29:42 crc kubenswrapper[5039]: W1124 13:29:42.816426 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd615d58f_8a19_4226_a022_26c3c2f46eaa.slice/crio-ca9091a3e10e6cc74012ff1551ac287073a21a88694977612dc501fbb866adb8 WatchSource:0}: Error finding container ca9091a3e10e6cc74012ff1551ac287073a21a88694977612dc501fbb866adb8: Status 404 returned error can't find the container with id ca9091a3e10e6cc74012ff1551ac287073a21a88694977612dc501fbb866adb8 Nov 24 13:29:43 crc kubenswrapper[5039]: I1124 13:29:43.473287 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz" event={"ID":"d615d58f-8a19-4226-a022-26c3c2f46eaa","Type":"ContainerStarted","Data":"44aba6823a7dc6e0ac21303d810ebe6836b02d867af64d4ba54e0d79a99ed252"} Nov 24 13:29:43 crc kubenswrapper[5039]: I1124 13:29:43.473355 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz" event={"ID":"d615d58f-8a19-4226-a022-26c3c2f46eaa","Type":"ContainerStarted","Data":"ca9091a3e10e6cc74012ff1551ac287073a21a88694977612dc501fbb866adb8"} Nov 24 13:29:43 crc kubenswrapper[5039]: I1124 13:29:43.474843 5039 generic.go:334] "Generic (PLEG): container finished" podID="8153476f-9f52-4a9b-9976-f71664f6f667" containerID="d25225c0612a5e3f6640e938879bcfeb17a3af8755c5c7db5ccdbcaf85a993e0" exitCode=0 Nov 24 13:29:43 crc kubenswrapper[5039]: I1124 13:29:43.474887 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6" event={"ID":"8153476f-9f52-4a9b-9976-f71664f6f667","Type":"ContainerDied","Data":"d25225c0612a5e3f6640e938879bcfeb17a3af8755c5c7db5ccdbcaf85a993e0"} Nov 24 13:29:43 crc kubenswrapper[5039]: I1124 13:29:43.474901 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6" event={"ID":"8153476f-9f52-4a9b-9976-f71664f6f667","Type":"ContainerStarted","Data":"7f7f31cbf3246c35c4285fdf3c236371de195c4f0b674e8a11b15633152b5123"} Nov 24 13:29:44 crc kubenswrapper[5039]: I1124 13:29:44.484162 5039 generic.go:334] "Generic (PLEG): container finished" podID="d615d58f-8a19-4226-a022-26c3c2f46eaa" containerID="44aba6823a7dc6e0ac21303d810ebe6836b02d867af64d4ba54e0d79a99ed252" exitCode=0 Nov 24 13:29:44 crc kubenswrapper[5039]: I1124 13:29:44.484216 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz" event={"ID":"d615d58f-8a19-4226-a022-26c3c2f46eaa","Type":"ContainerDied","Data":"44aba6823a7dc6e0ac21303d810ebe6836b02d867af64d4ba54e0d79a99ed252"} Nov 24 13:29:45 crc kubenswrapper[5039]: I1124 13:29:45.492391 5039 generic.go:334] "Generic (PLEG): container finished" podID="8153476f-9f52-4a9b-9976-f71664f6f667" containerID="84881bd9cbb21f609cf9f410498f76d34256c9bedbbb2cc90f59b49d40fef048" exitCode=0 Nov 24 13:29:45 crc kubenswrapper[5039]: I1124 13:29:45.492426 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6" event={"ID":"8153476f-9f52-4a9b-9976-f71664f6f667","Type":"ContainerDied","Data":"84881bd9cbb21f609cf9f410498f76d34256c9bedbbb2cc90f59b49d40fef048"} Nov 24 13:29:46 crc kubenswrapper[5039]: I1124 13:29:46.498984 5039 generic.go:334] "Generic (PLEG): container finished" podID="8153476f-9f52-4a9b-9976-f71664f6f667" containerID="f8cfe5e40087ba3cf6b8b43e205426e1b9d61760a2f8aad84072e6f1dd333271" exitCode=0 Nov 24 13:29:46 crc kubenswrapper[5039]: I1124 13:29:46.499081 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6" event={"ID":"8153476f-9f52-4a9b-9976-f71664f6f667","Type":"ContainerDied","Data":"f8cfe5e40087ba3cf6b8b43e205426e1b9d61760a2f8aad84072e6f1dd333271"} Nov 24 13:29:47 crc kubenswrapper[5039]: I1124 13:29:47.506371 5039 generic.go:334] "Generic (PLEG): container finished" podID="d615d58f-8a19-4226-a022-26c3c2f46eaa" containerID="cccab93fbcd03442cbad196cff89484ed0d9087dfb65d11176e2a6b4728c619e" exitCode=0 Nov 24 13:29:47 crc kubenswrapper[5039]: I1124 13:29:47.506757 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz" event={"ID":"d615d58f-8a19-4226-a022-26c3c2f46eaa","Type":"ContainerDied","Data":"cccab93fbcd03442cbad196cff89484ed0d9087dfb65d11176e2a6b4728c619e"} Nov 24 13:29:47 crc kubenswrapper[5039]: I1124 13:29:47.713957 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6" Nov 24 13:29:47 crc kubenswrapper[5039]: I1124 13:29:47.788065 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8153476f-9f52-4a9b-9976-f71664f6f667-util\") pod \"8153476f-9f52-4a9b-9976-f71664f6f667\" (UID: \"8153476f-9f52-4a9b-9976-f71664f6f667\") " Nov 24 13:29:47 crc kubenswrapper[5039]: I1124 13:29:47.788166 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8153476f-9f52-4a9b-9976-f71664f6f667-bundle\") pod \"8153476f-9f52-4a9b-9976-f71664f6f667\" (UID: \"8153476f-9f52-4a9b-9976-f71664f6f667\") " Nov 24 13:29:47 crc kubenswrapper[5039]: I1124 13:29:47.788193 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l6d2c\" (UniqueName: \"kubernetes.io/projected/8153476f-9f52-4a9b-9976-f71664f6f667-kube-api-access-l6d2c\") pod \"8153476f-9f52-4a9b-9976-f71664f6f667\" (UID: \"8153476f-9f52-4a9b-9976-f71664f6f667\") " Nov 24 13:29:47 crc kubenswrapper[5039]: I1124 13:29:47.789913 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8153476f-9f52-4a9b-9976-f71664f6f667-bundle" (OuterVolumeSpecName: "bundle") pod "8153476f-9f52-4a9b-9976-f71664f6f667" (UID: "8153476f-9f52-4a9b-9976-f71664f6f667"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:29:47 crc kubenswrapper[5039]: I1124 13:29:47.793304 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8153476f-9f52-4a9b-9976-f71664f6f667-kube-api-access-l6d2c" (OuterVolumeSpecName: "kube-api-access-l6d2c") pod "8153476f-9f52-4a9b-9976-f71664f6f667" (UID: "8153476f-9f52-4a9b-9976-f71664f6f667"). InnerVolumeSpecName "kube-api-access-l6d2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:29:47 crc kubenswrapper[5039]: I1124 13:29:47.804426 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8153476f-9f52-4a9b-9976-f71664f6f667-util" (OuterVolumeSpecName: "util") pod "8153476f-9f52-4a9b-9976-f71664f6f667" (UID: "8153476f-9f52-4a9b-9976-f71664f6f667"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:29:47 crc kubenswrapper[5039]: I1124 13:29:47.889823 5039 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8153476f-9f52-4a9b-9976-f71664f6f667-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:29:47 crc kubenswrapper[5039]: I1124 13:29:47.889865 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l6d2c\" (UniqueName: \"kubernetes.io/projected/8153476f-9f52-4a9b-9976-f71664f6f667-kube-api-access-l6d2c\") on node \"crc\" DevicePath \"\"" Nov 24 13:29:47 crc kubenswrapper[5039]: I1124 13:29:47.889875 5039 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8153476f-9f52-4a9b-9976-f71664f6f667-util\") on node \"crc\" DevicePath \"\"" Nov 24 13:29:48 crc kubenswrapper[5039]: I1124 13:29:48.519387 5039 generic.go:334] "Generic (PLEG): container finished" podID="d615d58f-8a19-4226-a022-26c3c2f46eaa" containerID="d11f9679ef4bc09327b21938c4b4ea250f4b7a43d0a381c241f69c8e6bf18a9c" exitCode=0 Nov 24 13:29:48 crc kubenswrapper[5039]: I1124 13:29:48.519442 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz" event={"ID":"d615d58f-8a19-4226-a022-26c3c2f46eaa","Type":"ContainerDied","Data":"d11f9679ef4bc09327b21938c4b4ea250f4b7a43d0a381c241f69c8e6bf18a9c"} Nov 24 13:29:48 crc kubenswrapper[5039]: I1124 13:29:48.522612 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6" event={"ID":"8153476f-9f52-4a9b-9976-f71664f6f667","Type":"ContainerDied","Data":"7f7f31cbf3246c35c4285fdf3c236371de195c4f0b674e8a11b15633152b5123"} Nov 24 13:29:48 crc kubenswrapper[5039]: I1124 13:29:48.522651 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7f7f31cbf3246c35c4285fdf3c236371de195c4f0b674e8a11b15633152b5123" Nov 24 13:29:48 crc kubenswrapper[5039]: I1124 13:29:48.522711 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6" Nov 24 13:29:49 crc kubenswrapper[5039]: I1124 13:29:49.741655 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz" Nov 24 13:29:49 crc kubenswrapper[5039]: I1124 13:29:49.812339 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nvfdh\" (UniqueName: \"kubernetes.io/projected/d615d58f-8a19-4226-a022-26c3c2f46eaa-kube-api-access-nvfdh\") pod \"d615d58f-8a19-4226-a022-26c3c2f46eaa\" (UID: \"d615d58f-8a19-4226-a022-26c3c2f46eaa\") " Nov 24 13:29:49 crc kubenswrapper[5039]: I1124 13:29:49.812526 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d615d58f-8a19-4226-a022-26c3c2f46eaa-bundle\") pod \"d615d58f-8a19-4226-a022-26c3c2f46eaa\" (UID: \"d615d58f-8a19-4226-a022-26c3c2f46eaa\") " Nov 24 13:29:49 crc kubenswrapper[5039]: I1124 13:29:49.812600 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d615d58f-8a19-4226-a022-26c3c2f46eaa-util\") pod \"d615d58f-8a19-4226-a022-26c3c2f46eaa\" (UID: \"d615d58f-8a19-4226-a022-26c3c2f46eaa\") " Nov 24 13:29:49 crc kubenswrapper[5039]: I1124 13:29:49.813851 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d615d58f-8a19-4226-a022-26c3c2f46eaa-bundle" (OuterVolumeSpecName: "bundle") pod "d615d58f-8a19-4226-a022-26c3c2f46eaa" (UID: "d615d58f-8a19-4226-a022-26c3c2f46eaa"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:29:49 crc kubenswrapper[5039]: I1124 13:29:49.816706 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d615d58f-8a19-4226-a022-26c3c2f46eaa-kube-api-access-nvfdh" (OuterVolumeSpecName: "kube-api-access-nvfdh") pod "d615d58f-8a19-4226-a022-26c3c2f46eaa" (UID: "d615d58f-8a19-4226-a022-26c3c2f46eaa"). InnerVolumeSpecName "kube-api-access-nvfdh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:29:49 crc kubenswrapper[5039]: I1124 13:29:49.822526 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d615d58f-8a19-4226-a022-26c3c2f46eaa-util" (OuterVolumeSpecName: "util") pod "d615d58f-8a19-4226-a022-26c3c2f46eaa" (UID: "d615d58f-8a19-4226-a022-26c3c2f46eaa"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:29:49 crc kubenswrapper[5039]: I1124 13:29:49.914525 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nvfdh\" (UniqueName: \"kubernetes.io/projected/d615d58f-8a19-4226-a022-26c3c2f46eaa-kube-api-access-nvfdh\") on node \"crc\" DevicePath \"\"" Nov 24 13:29:49 crc kubenswrapper[5039]: I1124 13:29:49.914559 5039 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d615d58f-8a19-4226-a022-26c3c2f46eaa-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:29:49 crc kubenswrapper[5039]: I1124 13:29:49.914568 5039 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d615d58f-8a19-4226-a022-26c3c2f46eaa-util\") on node \"crc\" DevicePath \"\"" Nov 24 13:29:50 crc kubenswrapper[5039]: I1124 13:29:50.101478 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:29:50 crc kubenswrapper[5039]: I1124 13:29:50.101548 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:29:50 crc kubenswrapper[5039]: I1124 13:29:50.536309 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz" event={"ID":"d615d58f-8a19-4226-a022-26c3c2f46eaa","Type":"ContainerDied","Data":"ca9091a3e10e6cc74012ff1551ac287073a21a88694977612dc501fbb866adb8"} Nov 24 13:29:50 crc kubenswrapper[5039]: I1124 13:29:50.536750 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca9091a3e10e6cc74012ff1551ac287073a21a88694977612dc501fbb866adb8" Nov 24 13:29:50 crc kubenswrapper[5039]: I1124 13:29:50.536411 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.076735 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb"] Nov 24 13:30:00 crc kubenswrapper[5039]: E1124 13:30:00.077538 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8153476f-9f52-4a9b-9976-f71664f6f667" containerName="pull" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.077554 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8153476f-9f52-4a9b-9976-f71664f6f667" containerName="pull" Nov 24 13:30:00 crc kubenswrapper[5039]: E1124 13:30:00.077572 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d615d58f-8a19-4226-a022-26c3c2f46eaa" containerName="extract" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.077580 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="d615d58f-8a19-4226-a022-26c3c2f46eaa" containerName="extract" Nov 24 13:30:00 crc kubenswrapper[5039]: E1124 13:30:00.077591 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d615d58f-8a19-4226-a022-26c3c2f46eaa" containerName="pull" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.077599 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="d615d58f-8a19-4226-a022-26c3c2f46eaa" containerName="pull" Nov 24 13:30:00 crc kubenswrapper[5039]: E1124 13:30:00.077616 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8153476f-9f52-4a9b-9976-f71664f6f667" containerName="extract" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.077623 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8153476f-9f52-4a9b-9976-f71664f6f667" containerName="extract" Nov 24 13:30:00 crc kubenswrapper[5039]: E1124 13:30:00.077635 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d615d58f-8a19-4226-a022-26c3c2f46eaa" containerName="util" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.077644 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="d615d58f-8a19-4226-a022-26c3c2f46eaa" containerName="util" Nov 24 13:30:00 crc kubenswrapper[5039]: E1124 13:30:00.077655 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8153476f-9f52-4a9b-9976-f71664f6f667" containerName="util" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.077662 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8153476f-9f52-4a9b-9976-f71664f6f667" containerName="util" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.077772 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="8153476f-9f52-4a9b-9976-f71664f6f667" containerName="extract" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.077793 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="d615d58f-8a19-4226-a022-26c3c2f46eaa" containerName="extract" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.078530 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.081264 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"openshift-service-ca.crt" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.081548 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"kube-root-ca.crt" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.081606 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-service-cert" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.081612 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"loki-operator-manager-config" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.081833 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-dockercfg-x2fw9" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.081984 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-metrics" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.098652 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb"] Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.138585 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e97f0fac-4f42-4ea9-b853-33c7aedeba68-apiservice-cert\") pod \"loki-operator-controller-manager-dfbf69d45-vngzb\" (UID: \"e97f0fac-4f42-4ea9-b853-33c7aedeba68\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.138675 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e97f0fac-4f42-4ea9-b853-33c7aedeba68-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-dfbf69d45-vngzb\" (UID: \"e97f0fac-4f42-4ea9-b853-33c7aedeba68\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.138710 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpw62\" (UniqueName: \"kubernetes.io/projected/e97f0fac-4f42-4ea9-b853-33c7aedeba68-kube-api-access-wpw62\") pod \"loki-operator-controller-manager-dfbf69d45-vngzb\" (UID: \"e97f0fac-4f42-4ea9-b853-33c7aedeba68\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.138733 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e97f0fac-4f42-4ea9-b853-33c7aedeba68-webhook-cert\") pod \"loki-operator-controller-manager-dfbf69d45-vngzb\" (UID: \"e97f0fac-4f42-4ea9-b853-33c7aedeba68\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.138770 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/e97f0fac-4f42-4ea9-b853-33c7aedeba68-manager-config\") pod \"loki-operator-controller-manager-dfbf69d45-vngzb\" (UID: \"e97f0fac-4f42-4ea9-b853-33c7aedeba68\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.172879 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399850-kwqf4"] Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.173572 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399850-kwqf4" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.174789 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.175350 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.182811 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399850-kwqf4"] Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.240210 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f8b02714-17dd-49f0-8cbe-2c61d3123d77-config-volume\") pod \"collect-profiles-29399850-kwqf4\" (UID: \"f8b02714-17dd-49f0-8cbe-2c61d3123d77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399850-kwqf4" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.240259 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e97f0fac-4f42-4ea9-b853-33c7aedeba68-apiservice-cert\") pod \"loki-operator-controller-manager-dfbf69d45-vngzb\" (UID: \"e97f0fac-4f42-4ea9-b853-33c7aedeba68\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.240284 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f8b02714-17dd-49f0-8cbe-2c61d3123d77-secret-volume\") pod \"collect-profiles-29399850-kwqf4\" (UID: \"f8b02714-17dd-49f0-8cbe-2c61d3123d77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399850-kwqf4" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.240299 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6td5\" (UniqueName: \"kubernetes.io/projected/f8b02714-17dd-49f0-8cbe-2c61d3123d77-kube-api-access-f6td5\") pod \"collect-profiles-29399850-kwqf4\" (UID: \"f8b02714-17dd-49f0-8cbe-2c61d3123d77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399850-kwqf4" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.240375 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e97f0fac-4f42-4ea9-b853-33c7aedeba68-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-dfbf69d45-vngzb\" (UID: \"e97f0fac-4f42-4ea9-b853-33c7aedeba68\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.240400 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpw62\" (UniqueName: \"kubernetes.io/projected/e97f0fac-4f42-4ea9-b853-33c7aedeba68-kube-api-access-wpw62\") pod \"loki-operator-controller-manager-dfbf69d45-vngzb\" (UID: \"e97f0fac-4f42-4ea9-b853-33c7aedeba68\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.240422 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e97f0fac-4f42-4ea9-b853-33c7aedeba68-webhook-cert\") pod \"loki-operator-controller-manager-dfbf69d45-vngzb\" (UID: \"e97f0fac-4f42-4ea9-b853-33c7aedeba68\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.240450 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/e97f0fac-4f42-4ea9-b853-33c7aedeba68-manager-config\") pod \"loki-operator-controller-manager-dfbf69d45-vngzb\" (UID: \"e97f0fac-4f42-4ea9-b853-33c7aedeba68\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.241588 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/e97f0fac-4f42-4ea9-b853-33c7aedeba68-manager-config\") pod \"loki-operator-controller-manager-dfbf69d45-vngzb\" (UID: \"e97f0fac-4f42-4ea9-b853-33c7aedeba68\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.246824 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e97f0fac-4f42-4ea9-b853-33c7aedeba68-webhook-cert\") pod \"loki-operator-controller-manager-dfbf69d45-vngzb\" (UID: \"e97f0fac-4f42-4ea9-b853-33c7aedeba68\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.249191 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e97f0fac-4f42-4ea9-b853-33c7aedeba68-apiservice-cert\") pod \"loki-operator-controller-manager-dfbf69d45-vngzb\" (UID: \"e97f0fac-4f42-4ea9-b853-33c7aedeba68\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.255775 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e97f0fac-4f42-4ea9-b853-33c7aedeba68-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-dfbf69d45-vngzb\" (UID: \"e97f0fac-4f42-4ea9-b853-33c7aedeba68\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.266122 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpw62\" (UniqueName: \"kubernetes.io/projected/e97f0fac-4f42-4ea9-b853-33c7aedeba68-kube-api-access-wpw62\") pod \"loki-operator-controller-manager-dfbf69d45-vngzb\" (UID: \"e97f0fac-4f42-4ea9-b853-33c7aedeba68\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.341565 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f8b02714-17dd-49f0-8cbe-2c61d3123d77-config-volume\") pod \"collect-profiles-29399850-kwqf4\" (UID: \"f8b02714-17dd-49f0-8cbe-2c61d3123d77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399850-kwqf4" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.341619 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f8b02714-17dd-49f0-8cbe-2c61d3123d77-secret-volume\") pod \"collect-profiles-29399850-kwqf4\" (UID: \"f8b02714-17dd-49f0-8cbe-2c61d3123d77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399850-kwqf4" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.341651 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6td5\" (UniqueName: \"kubernetes.io/projected/f8b02714-17dd-49f0-8cbe-2c61d3123d77-kube-api-access-f6td5\") pod \"collect-profiles-29399850-kwqf4\" (UID: \"f8b02714-17dd-49f0-8cbe-2c61d3123d77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399850-kwqf4" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.342598 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f8b02714-17dd-49f0-8cbe-2c61d3123d77-config-volume\") pod \"collect-profiles-29399850-kwqf4\" (UID: \"f8b02714-17dd-49f0-8cbe-2c61d3123d77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399850-kwqf4" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.347226 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f8b02714-17dd-49f0-8cbe-2c61d3123d77-secret-volume\") pod \"collect-profiles-29399850-kwqf4\" (UID: \"f8b02714-17dd-49f0-8cbe-2c61d3123d77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399850-kwqf4" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.363285 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6td5\" (UniqueName: \"kubernetes.io/projected/f8b02714-17dd-49f0-8cbe-2c61d3123d77-kube-api-access-f6td5\") pod \"collect-profiles-29399850-kwqf4\" (UID: \"f8b02714-17dd-49f0-8cbe-2c61d3123d77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399850-kwqf4" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.392987 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.486942 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399850-kwqf4" Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.621091 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb"] Nov 24 13:30:00 crc kubenswrapper[5039]: I1124 13:30:00.757755 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399850-kwqf4"] Nov 24 13:30:00 crc kubenswrapper[5039]: W1124 13:30:00.762207 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf8b02714_17dd_49f0_8cbe_2c61d3123d77.slice/crio-3d9aced4b4c43f0975554b262bbac2236344ee4542bb35223b9c177c6c505a01 WatchSource:0}: Error finding container 3d9aced4b4c43f0975554b262bbac2236344ee4542bb35223b9c177c6c505a01: Status 404 returned error can't find the container with id 3d9aced4b4c43f0975554b262bbac2236344ee4542bb35223b9c177c6c505a01 Nov 24 13:30:01 crc kubenswrapper[5039]: I1124 13:30:01.617579 5039 generic.go:334] "Generic (PLEG): container finished" podID="f8b02714-17dd-49f0-8cbe-2c61d3123d77" containerID="d5242c347ed24e17b516433160bff47edb8971f9b4473fdcc9da59c8e3dc229b" exitCode=0 Nov 24 13:30:01 crc kubenswrapper[5039]: I1124 13:30:01.617639 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399850-kwqf4" event={"ID":"f8b02714-17dd-49f0-8cbe-2c61d3123d77","Type":"ContainerDied","Data":"d5242c347ed24e17b516433160bff47edb8971f9b4473fdcc9da59c8e3dc229b"} Nov 24 13:30:01 crc kubenswrapper[5039]: I1124 13:30:01.617890 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399850-kwqf4" event={"ID":"f8b02714-17dd-49f0-8cbe-2c61d3123d77","Type":"ContainerStarted","Data":"3d9aced4b4c43f0975554b262bbac2236344ee4542bb35223b9c177c6c505a01"} Nov 24 13:30:01 crc kubenswrapper[5039]: I1124 13:30:01.619052 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" event={"ID":"e97f0fac-4f42-4ea9-b853-33c7aedeba68","Type":"ContainerStarted","Data":"0141fba03c2aaa70ceba2412bcc4a78e7c1a619ec993c8dae473063fe76832e7"} Nov 24 13:30:01 crc kubenswrapper[5039]: I1124 13:30:01.877952 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-8d4cv"] Nov 24 13:30:01 crc kubenswrapper[5039]: I1124 13:30:01.878880 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-ff9846bd-8d4cv" Nov 24 13:30:01 crc kubenswrapper[5039]: I1124 13:30:01.880724 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"cluster-logging-operator-dockercfg-q4l9d" Nov 24 13:30:01 crc kubenswrapper[5039]: I1124 13:30:01.880924 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"kube-root-ca.crt" Nov 24 13:30:01 crc kubenswrapper[5039]: I1124 13:30:01.881396 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-8d4cv"] Nov 24 13:30:01 crc kubenswrapper[5039]: I1124 13:30:01.881614 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"openshift-service-ca.crt" Nov 24 13:30:01 crc kubenswrapper[5039]: I1124 13:30:01.960640 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kz5lf\" (UniqueName: \"kubernetes.io/projected/ecb03566-7ffa-42ab-aa02-22bad9858b86-kube-api-access-kz5lf\") pod \"cluster-logging-operator-ff9846bd-8d4cv\" (UID: \"ecb03566-7ffa-42ab-aa02-22bad9858b86\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-8d4cv" Nov 24 13:30:02 crc kubenswrapper[5039]: I1124 13:30:02.061833 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kz5lf\" (UniqueName: \"kubernetes.io/projected/ecb03566-7ffa-42ab-aa02-22bad9858b86-kube-api-access-kz5lf\") pod \"cluster-logging-operator-ff9846bd-8d4cv\" (UID: \"ecb03566-7ffa-42ab-aa02-22bad9858b86\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-8d4cv" Nov 24 13:30:02 crc kubenswrapper[5039]: I1124 13:30:02.095851 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kz5lf\" (UniqueName: \"kubernetes.io/projected/ecb03566-7ffa-42ab-aa02-22bad9858b86-kube-api-access-kz5lf\") pod \"cluster-logging-operator-ff9846bd-8d4cv\" (UID: \"ecb03566-7ffa-42ab-aa02-22bad9858b86\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-8d4cv" Nov 24 13:30:02 crc kubenswrapper[5039]: I1124 13:30:02.197727 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-ff9846bd-8d4cv" Nov 24 13:30:02 crc kubenswrapper[5039]: I1124 13:30:02.616889 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-8d4cv"] Nov 24 13:30:03 crc kubenswrapper[5039]: I1124 13:30:03.631723 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-ff9846bd-8d4cv" event={"ID":"ecb03566-7ffa-42ab-aa02-22bad9858b86","Type":"ContainerStarted","Data":"9bb31d2280de35c635003f925e26f6b9dbb2627056678e3436c1373e1bdb16cf"} Nov 24 13:30:04 crc kubenswrapper[5039]: I1124 13:30:04.330768 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399850-kwqf4" Nov 24 13:30:04 crc kubenswrapper[5039]: I1124 13:30:04.394762 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f8b02714-17dd-49f0-8cbe-2c61d3123d77-config-volume\") pod \"f8b02714-17dd-49f0-8cbe-2c61d3123d77\" (UID: \"f8b02714-17dd-49f0-8cbe-2c61d3123d77\") " Nov 24 13:30:04 crc kubenswrapper[5039]: I1124 13:30:04.394831 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f8b02714-17dd-49f0-8cbe-2c61d3123d77-secret-volume\") pod \"f8b02714-17dd-49f0-8cbe-2c61d3123d77\" (UID: \"f8b02714-17dd-49f0-8cbe-2c61d3123d77\") " Nov 24 13:30:04 crc kubenswrapper[5039]: I1124 13:30:04.394963 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6td5\" (UniqueName: \"kubernetes.io/projected/f8b02714-17dd-49f0-8cbe-2c61d3123d77-kube-api-access-f6td5\") pod \"f8b02714-17dd-49f0-8cbe-2c61d3123d77\" (UID: \"f8b02714-17dd-49f0-8cbe-2c61d3123d77\") " Nov 24 13:30:04 crc kubenswrapper[5039]: I1124 13:30:04.397179 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8b02714-17dd-49f0-8cbe-2c61d3123d77-config-volume" (OuterVolumeSpecName: "config-volume") pod "f8b02714-17dd-49f0-8cbe-2c61d3123d77" (UID: "f8b02714-17dd-49f0-8cbe-2c61d3123d77"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:30:04 crc kubenswrapper[5039]: I1124 13:30:04.401735 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8b02714-17dd-49f0-8cbe-2c61d3123d77-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f8b02714-17dd-49f0-8cbe-2c61d3123d77" (UID: "f8b02714-17dd-49f0-8cbe-2c61d3123d77"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:30:04 crc kubenswrapper[5039]: I1124 13:30:04.403617 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8b02714-17dd-49f0-8cbe-2c61d3123d77-kube-api-access-f6td5" (OuterVolumeSpecName: "kube-api-access-f6td5") pod "f8b02714-17dd-49f0-8cbe-2c61d3123d77" (UID: "f8b02714-17dd-49f0-8cbe-2c61d3123d77"). InnerVolumeSpecName "kube-api-access-f6td5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:30:04 crc kubenswrapper[5039]: I1124 13:30:04.496404 5039 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f8b02714-17dd-49f0-8cbe-2c61d3123d77-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 13:30:04 crc kubenswrapper[5039]: I1124 13:30:04.496445 5039 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f8b02714-17dd-49f0-8cbe-2c61d3123d77-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 13:30:04 crc kubenswrapper[5039]: I1124 13:30:04.496457 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6td5\" (UniqueName: \"kubernetes.io/projected/f8b02714-17dd-49f0-8cbe-2c61d3123d77-kube-api-access-f6td5\") on node \"crc\" DevicePath \"\"" Nov 24 13:30:04 crc kubenswrapper[5039]: I1124 13:30:04.640943 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399850-kwqf4" event={"ID":"f8b02714-17dd-49f0-8cbe-2c61d3123d77","Type":"ContainerDied","Data":"3d9aced4b4c43f0975554b262bbac2236344ee4542bb35223b9c177c6c505a01"} Nov 24 13:30:04 crc kubenswrapper[5039]: I1124 13:30:04.640992 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3d9aced4b4c43f0975554b262bbac2236344ee4542bb35223b9c177c6c505a01" Nov 24 13:30:04 crc kubenswrapper[5039]: I1124 13:30:04.641045 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399850-kwqf4" Nov 24 13:30:05 crc kubenswrapper[5039]: I1124 13:30:05.647245 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" event={"ID":"e97f0fac-4f42-4ea9-b853-33c7aedeba68","Type":"ContainerStarted","Data":"774985eb6229e912df6009a4cc802627aab28a6711073e30e25d1c41c684470b"} Nov 24 13:30:13 crc kubenswrapper[5039]: I1124 13:30:13.710286 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-ff9846bd-8d4cv" event={"ID":"ecb03566-7ffa-42ab-aa02-22bad9858b86","Type":"ContainerStarted","Data":"21f65a499576c1a339eeb152668bf412eb073a69a7e84ac3f076cce9714781af"} Nov 24 13:30:13 crc kubenswrapper[5039]: I1124 13:30:13.733056 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/cluster-logging-operator-ff9846bd-8d4cv" podStartSLOduration=1.952174898 podStartE2EDuration="12.73303964s" podCreationTimestamp="2025-11-24 13:30:01 +0000 UTC" firstStartedPulling="2025-11-24 13:30:02.638227332 +0000 UTC m=+715.077351832" lastFinishedPulling="2025-11-24 13:30:13.419092034 +0000 UTC m=+725.858216574" observedRunningTime="2025-11-24 13:30:13.726984274 +0000 UTC m=+726.166108764" watchObservedRunningTime="2025-11-24 13:30:13.73303964 +0000 UTC m=+726.172164140" Nov 24 13:30:14 crc kubenswrapper[5039]: I1124 13:30:14.718947 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" event={"ID":"e97f0fac-4f42-4ea9-b853-33c7aedeba68","Type":"ContainerStarted","Data":"7f6511967ae152bd4ae096839e081a832599478715aedb89d8aca8b40e12a449"} Nov 24 13:30:14 crc kubenswrapper[5039]: I1124 13:30:14.750699 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" podStartSLOduration=1.830559796 podStartE2EDuration="14.75067246s" podCreationTimestamp="2025-11-24 13:30:00 +0000 UTC" firstStartedPulling="2025-11-24 13:30:00.630909405 +0000 UTC m=+713.070033905" lastFinishedPulling="2025-11-24 13:30:13.551022069 +0000 UTC m=+725.990146569" observedRunningTime="2025-11-24 13:30:14.742250118 +0000 UTC m=+727.181374638" watchObservedRunningTime="2025-11-24 13:30:14.75067246 +0000 UTC m=+727.189796970" Nov 24 13:30:15 crc kubenswrapper[5039]: I1124 13:30:15.726624 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" Nov 24 13:30:15 crc kubenswrapper[5039]: I1124 13:30:15.728382 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators-redhat/loki-operator-controller-manager-dfbf69d45-vngzb" Nov 24 13:30:20 crc kubenswrapper[5039]: I1124 13:30:20.101896 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:30:20 crc kubenswrapper[5039]: I1124 13:30:20.102476 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:30:22 crc kubenswrapper[5039]: I1124 13:30:22.524020 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["minio-dev/minio"] Nov 24 13:30:22 crc kubenswrapper[5039]: E1124 13:30:22.524280 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8b02714-17dd-49f0-8cbe-2c61d3123d77" containerName="collect-profiles" Nov 24 13:30:22 crc kubenswrapper[5039]: I1124 13:30:22.524298 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8b02714-17dd-49f0-8cbe-2c61d3123d77" containerName="collect-profiles" Nov 24 13:30:22 crc kubenswrapper[5039]: I1124 13:30:22.524419 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8b02714-17dd-49f0-8cbe-2c61d3123d77" containerName="collect-profiles" Nov 24 13:30:22 crc kubenswrapper[5039]: I1124 13:30:22.524834 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Nov 24 13:30:22 crc kubenswrapper[5039]: I1124 13:30:22.528056 5039 reflector.go:368] Caches populated for *v1.Secret from object-"minio-dev"/"default-dockercfg-86dgn" Nov 24 13:30:22 crc kubenswrapper[5039]: I1124 13:30:22.528125 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"openshift-service-ca.crt" Nov 24 13:30:22 crc kubenswrapper[5039]: I1124 13:30:22.528316 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"kube-root-ca.crt" Nov 24 13:30:22 crc kubenswrapper[5039]: I1124 13:30:22.549603 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Nov 24 13:30:22 crc kubenswrapper[5039]: I1124 13:30:22.654479 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqcdg\" (UniqueName: \"kubernetes.io/projected/d4625c70-00ec-4ece-848a-4562511cff6b-kube-api-access-xqcdg\") pod \"minio\" (UID: \"d4625c70-00ec-4ece-848a-4562511cff6b\") " pod="minio-dev/minio" Nov 24 13:30:22 crc kubenswrapper[5039]: I1124 13:30:22.654617 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-c81f752e-2504-4e6c-8a1a-a82b6e6d2a67\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c81f752e-2504-4e6c-8a1a-a82b6e6d2a67\") pod \"minio\" (UID: \"d4625c70-00ec-4ece-848a-4562511cff6b\") " pod="minio-dev/minio" Nov 24 13:30:22 crc kubenswrapper[5039]: I1124 13:30:22.755494 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-c81f752e-2504-4e6c-8a1a-a82b6e6d2a67\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c81f752e-2504-4e6c-8a1a-a82b6e6d2a67\") pod \"minio\" (UID: \"d4625c70-00ec-4ece-848a-4562511cff6b\") " pod="minio-dev/minio" Nov 24 13:30:22 crc kubenswrapper[5039]: I1124 13:30:22.755616 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqcdg\" (UniqueName: \"kubernetes.io/projected/d4625c70-00ec-4ece-848a-4562511cff6b-kube-api-access-xqcdg\") pod \"minio\" (UID: \"d4625c70-00ec-4ece-848a-4562511cff6b\") " pod="minio-dev/minio" Nov 24 13:30:22 crc kubenswrapper[5039]: I1124 13:30:22.779751 5039 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 24 13:30:22 crc kubenswrapper[5039]: I1124 13:30:22.779799 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-c81f752e-2504-4e6c-8a1a-a82b6e6d2a67\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c81f752e-2504-4e6c-8a1a-a82b6e6d2a67\") pod \"minio\" (UID: \"d4625c70-00ec-4ece-848a-4562511cff6b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/853ee7654cdd859558904f2eb5a36a749238cf00f9ec249dfbc806cbfadaf739/globalmount\"" pod="minio-dev/minio" Nov 24 13:30:22 crc kubenswrapper[5039]: I1124 13:30:22.800693 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqcdg\" (UniqueName: \"kubernetes.io/projected/d4625c70-00ec-4ece-848a-4562511cff6b-kube-api-access-xqcdg\") pod \"minio\" (UID: \"d4625c70-00ec-4ece-848a-4562511cff6b\") " pod="minio-dev/minio" Nov 24 13:30:22 crc kubenswrapper[5039]: I1124 13:30:22.898243 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-c81f752e-2504-4e6c-8a1a-a82b6e6d2a67\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c81f752e-2504-4e6c-8a1a-a82b6e6d2a67\") pod \"minio\" (UID: \"d4625c70-00ec-4ece-848a-4562511cff6b\") " pod="minio-dev/minio" Nov 24 13:30:23 crc kubenswrapper[5039]: I1124 13:30:23.152138 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Nov 24 13:30:23 crc kubenswrapper[5039]: I1124 13:30:23.607623 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Nov 24 13:30:23 crc kubenswrapper[5039]: I1124 13:30:23.770862 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"d4625c70-00ec-4ece-848a-4562511cff6b","Type":"ContainerStarted","Data":"166823f1ae135531b7a218ddca1aa2d87971042003509d9181a7aebfedd84b9a"} Nov 24 13:30:27 crc kubenswrapper[5039]: I1124 13:30:27.797954 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"d4625c70-00ec-4ece-848a-4562511cff6b","Type":"ContainerStarted","Data":"c8882097ebae2430cf55f0a8b97b36a4085c44358f7804cc14c5dcbcf14d3056"} Nov 24 13:30:27 crc kubenswrapper[5039]: I1124 13:30:27.815115 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="minio-dev/minio" podStartSLOduration=5.7756063 podStartE2EDuration="8.815093679s" podCreationTimestamp="2025-11-24 13:30:19 +0000 UTC" firstStartedPulling="2025-11-24 13:30:23.615637534 +0000 UTC m=+736.054762054" lastFinishedPulling="2025-11-24 13:30:26.655124923 +0000 UTC m=+739.094249433" observedRunningTime="2025-11-24 13:30:27.814994887 +0000 UTC m=+740.254119417" watchObservedRunningTime="2025-11-24 13:30:27.815093679 +0000 UTC m=+740.254218189" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.051751 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7"] Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.053009 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.057639 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-dockercfg-8gvbc" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.058011 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-grpc" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.058320 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-ca-bundle" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.058733 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-http" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.059040 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-config" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.063971 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7"] Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.195403 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/37a01398-aa18-423a-8fa0-b3d1f5fe0cfd-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-xwwg7\" (UID: \"37a01398-aa18-423a-8fa0-b3d1f5fe0cfd\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.195471 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhxtx\" (UniqueName: \"kubernetes.io/projected/37a01398-aa18-423a-8fa0-b3d1f5fe0cfd-kube-api-access-hhxtx\") pod \"logging-loki-distributor-76cc67bf56-xwwg7\" (UID: \"37a01398-aa18-423a-8fa0-b3d1f5fe0cfd\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.195580 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/37a01398-aa18-423a-8fa0-b3d1f5fe0cfd-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-xwwg7\" (UID: \"37a01398-aa18-423a-8fa0-b3d1f5fe0cfd\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.195643 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37a01398-aa18-423a-8fa0-b3d1f5fe0cfd-config\") pod \"logging-loki-distributor-76cc67bf56-xwwg7\" (UID: \"37a01398-aa18-423a-8fa0-b3d1f5fe0cfd\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.195710 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37a01398-aa18-423a-8fa0-b3d1f5fe0cfd-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-xwwg7\" (UID: \"37a01398-aa18-423a-8fa0-b3d1f5fe0cfd\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.197861 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-4xrbf"] Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.198595 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.200294 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-http" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.200570 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-grpc" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.209232 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-4xrbf"] Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.211012 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-s3" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.264116 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s"] Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.265113 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.267036 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-http" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.268096 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-grpc" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.281715 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s"] Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.296689 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3453a5d-07da-4391-a2d5-df5154962b61-config\") pod \"logging-loki-querier-5895d59bb8-4xrbf\" (UID: \"f3453a5d-07da-4391-a2d5-df5154962b61\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.296764 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37a01398-aa18-423a-8fa0-b3d1f5fe0cfd-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-xwwg7\" (UID: \"37a01398-aa18-423a-8fa0-b3d1f5fe0cfd\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.296804 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fz4jt\" (UniqueName: \"kubernetes.io/projected/f3453a5d-07da-4391-a2d5-df5154962b61-kube-api-access-fz4jt\") pod \"logging-loki-querier-5895d59bb8-4xrbf\" (UID: \"f3453a5d-07da-4391-a2d5-df5154962b61\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.296863 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/f3453a5d-07da-4391-a2d5-df5154962b61-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-4xrbf\" (UID: \"f3453a5d-07da-4391-a2d5-df5154962b61\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.296932 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/37a01398-aa18-423a-8fa0-b3d1f5fe0cfd-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-xwwg7\" (UID: \"37a01398-aa18-423a-8fa0-b3d1f5fe0cfd\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.296956 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhxtx\" (UniqueName: \"kubernetes.io/projected/37a01398-aa18-423a-8fa0-b3d1f5fe0cfd-kube-api-access-hhxtx\") pod \"logging-loki-distributor-76cc67bf56-xwwg7\" (UID: \"37a01398-aa18-423a-8fa0-b3d1f5fe0cfd\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.297026 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/f3453a5d-07da-4391-a2d5-df5154962b61-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-4xrbf\" (UID: \"f3453a5d-07da-4391-a2d5-df5154962b61\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.297050 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/37a01398-aa18-423a-8fa0-b3d1f5fe0cfd-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-xwwg7\" (UID: \"37a01398-aa18-423a-8fa0-b3d1f5fe0cfd\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.297098 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/f3453a5d-07da-4391-a2d5-df5154962b61-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-4xrbf\" (UID: \"f3453a5d-07da-4391-a2d5-df5154962b61\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.297137 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37a01398-aa18-423a-8fa0-b3d1f5fe0cfd-config\") pod \"logging-loki-distributor-76cc67bf56-xwwg7\" (UID: \"37a01398-aa18-423a-8fa0-b3d1f5fe0cfd\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.297195 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f3453a5d-07da-4391-a2d5-df5154962b61-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-4xrbf\" (UID: \"f3453a5d-07da-4391-a2d5-df5154962b61\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.297999 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37a01398-aa18-423a-8fa0-b3d1f5fe0cfd-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-xwwg7\" (UID: \"37a01398-aa18-423a-8fa0-b3d1f5fe0cfd\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.298345 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37a01398-aa18-423a-8fa0-b3d1f5fe0cfd-config\") pod \"logging-loki-distributor-76cc67bf56-xwwg7\" (UID: \"37a01398-aa18-423a-8fa0-b3d1f5fe0cfd\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.322312 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/37a01398-aa18-423a-8fa0-b3d1f5fe0cfd-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-xwwg7\" (UID: \"37a01398-aa18-423a-8fa0-b3d1f5fe0cfd\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.334651 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/37a01398-aa18-423a-8fa0-b3d1f5fe0cfd-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-xwwg7\" (UID: \"37a01398-aa18-423a-8fa0-b3d1f5fe0cfd\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.335059 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhxtx\" (UniqueName: \"kubernetes.io/projected/37a01398-aa18-423a-8fa0-b3d1f5fe0cfd-kube-api-access-hhxtx\") pod \"logging-loki-distributor-76cc67bf56-xwwg7\" (UID: \"37a01398-aa18-423a-8fa0-b3d1f5fe0cfd\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.378528 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg"] Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.379750 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.383074 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.383349 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-client-http" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.383875 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-http" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.383885 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.385022 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj"] Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.386216 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.389237 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway-ca-bundle" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.389545 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-dockercfg-4kz9t" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.398779 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0c93d652-05e2-4359-b5f6-6951e26ba7d2-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-qvz9s\" (UID: \"0c93d652-05e2-4359-b5f6-6951e26ba7d2\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.398835 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/f3453a5d-07da-4391-a2d5-df5154962b61-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-4xrbf\" (UID: \"f3453a5d-07da-4391-a2d5-df5154962b61\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.398867 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/f3453a5d-07da-4391-a2d5-df5154962b61-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-4xrbf\" (UID: \"f3453a5d-07da-4391-a2d5-df5154962b61\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.398897 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c93d652-05e2-4359-b5f6-6951e26ba7d2-config\") pod \"logging-loki-query-frontend-84558f7c9f-qvz9s\" (UID: \"0c93d652-05e2-4359-b5f6-6951e26ba7d2\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.398929 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lshhd\" (UniqueName: \"kubernetes.io/projected/0c93d652-05e2-4359-b5f6-6951e26ba7d2-kube-api-access-lshhd\") pod \"logging-loki-query-frontend-84558f7c9f-qvz9s\" (UID: \"0c93d652-05e2-4359-b5f6-6951e26ba7d2\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.398965 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f3453a5d-07da-4391-a2d5-df5154962b61-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-4xrbf\" (UID: \"f3453a5d-07da-4391-a2d5-df5154962b61\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.398995 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3453a5d-07da-4391-a2d5-df5154962b61-config\") pod \"logging-loki-querier-5895d59bb8-4xrbf\" (UID: \"f3453a5d-07da-4391-a2d5-df5154962b61\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.399026 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fz4jt\" (UniqueName: \"kubernetes.io/projected/f3453a5d-07da-4391-a2d5-df5154962b61-kube-api-access-fz4jt\") pod \"logging-loki-querier-5895d59bb8-4xrbf\" (UID: \"f3453a5d-07da-4391-a2d5-df5154962b61\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.399064 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/f3453a5d-07da-4391-a2d5-df5154962b61-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-4xrbf\" (UID: \"f3453a5d-07da-4391-a2d5-df5154962b61\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.399086 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/0c93d652-05e2-4359-b5f6-6951e26ba7d2-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-qvz9s\" (UID: \"0c93d652-05e2-4359-b5f6-6951e26ba7d2\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.399128 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/0c93d652-05e2-4359-b5f6-6951e26ba7d2-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-qvz9s\" (UID: \"0c93d652-05e2-4359-b5f6-6951e26ba7d2\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.402303 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f3453a5d-07da-4391-a2d5-df5154962b61-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-4xrbf\" (UID: \"f3453a5d-07da-4391-a2d5-df5154962b61\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.403030 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3453a5d-07da-4391-a2d5-df5154962b61-config\") pod \"logging-loki-querier-5895d59bb8-4xrbf\" (UID: \"f3453a5d-07da-4391-a2d5-df5154962b61\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.405105 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/f3453a5d-07da-4391-a2d5-df5154962b61-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-4xrbf\" (UID: \"f3453a5d-07da-4391-a2d5-df5154962b61\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.407090 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.417371 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/f3453a5d-07da-4391-a2d5-df5154962b61-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-4xrbf\" (UID: \"f3453a5d-07da-4391-a2d5-df5154962b61\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.417487 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/f3453a5d-07da-4391-a2d5-df5154962b61-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-4xrbf\" (UID: \"f3453a5d-07da-4391-a2d5-df5154962b61\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.423930 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fz4jt\" (UniqueName: \"kubernetes.io/projected/f3453a5d-07da-4391-a2d5-df5154962b61-kube-api-access-fz4jt\") pod \"logging-loki-querier-5895d59bb8-4xrbf\" (UID: \"f3453a5d-07da-4391-a2d5-df5154962b61\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.425026 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg"] Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.431078 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj"] Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.500333 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwsvh\" (UniqueName: \"kubernetes.io/projected/f025c7ee-097c-4915-9946-41b57f995f0d-kube-api-access-zwsvh\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.500397 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/0c93d652-05e2-4359-b5f6-6951e26ba7d2-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-qvz9s\" (UID: \"0c93d652-05e2-4359-b5f6-6951e26ba7d2\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.501080 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/0c93d652-05e2-4359-b5f6-6951e26ba7d2-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-qvz9s\" (UID: \"0c93d652-05e2-4359-b5f6-6951e26ba7d2\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.501107 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/c24389d6-c229-4c2b-9933-61cd5f9b81d3-tls-secret\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.501461 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/f025c7ee-097c-4915-9946-41b57f995f0d-tenants\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.501809 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/f025c7ee-097c-4915-9946-41b57f995f0d-tls-secret\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.501827 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/c24389d6-c229-4c2b-9933-61cd5f9b81d3-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.501858 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0c93d652-05e2-4359-b5f6-6951e26ba7d2-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-qvz9s\" (UID: \"0c93d652-05e2-4359-b5f6-6951e26ba7d2\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.501879 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwddl\" (UniqueName: \"kubernetes.io/projected/c24389d6-c229-4c2b-9933-61cd5f9b81d3-kube-api-access-rwddl\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.501898 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/f025c7ee-097c-4915-9946-41b57f995f0d-rbac\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.501926 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f025c7ee-097c-4915-9946-41b57f995f0d-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.501942 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c93d652-05e2-4359-b5f6-6951e26ba7d2-config\") pod \"logging-loki-query-frontend-84558f7c9f-qvz9s\" (UID: \"0c93d652-05e2-4359-b5f6-6951e26ba7d2\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.501963 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/f025c7ee-097c-4915-9946-41b57f995f0d-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.501980 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lshhd\" (UniqueName: \"kubernetes.io/projected/0c93d652-05e2-4359-b5f6-6951e26ba7d2-kube-api-access-lshhd\") pod \"logging-loki-query-frontend-84558f7c9f-qvz9s\" (UID: \"0c93d652-05e2-4359-b5f6-6951e26ba7d2\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.501999 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f025c7ee-097c-4915-9946-41b57f995f0d-logging-loki-ca-bundle\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.502014 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c24389d6-c229-4c2b-9933-61cd5f9b81d3-logging-loki-ca-bundle\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.502050 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/f025c7ee-097c-4915-9946-41b57f995f0d-lokistack-gateway\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.502068 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/c24389d6-c229-4c2b-9933-61cd5f9b81d3-lokistack-gateway\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.502085 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/c24389d6-c229-4c2b-9933-61cd5f9b81d3-rbac\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.502104 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/c24389d6-c229-4c2b-9933-61cd5f9b81d3-tenants\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.502129 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c24389d6-c229-4c2b-9933-61cd5f9b81d3-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.502917 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0c93d652-05e2-4359-b5f6-6951e26ba7d2-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-qvz9s\" (UID: \"0c93d652-05e2-4359-b5f6-6951e26ba7d2\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.505304 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c93d652-05e2-4359-b5f6-6951e26ba7d2-config\") pod \"logging-loki-query-frontend-84558f7c9f-qvz9s\" (UID: \"0c93d652-05e2-4359-b5f6-6951e26ba7d2\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.506836 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/0c93d652-05e2-4359-b5f6-6951e26ba7d2-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-qvz9s\" (UID: \"0c93d652-05e2-4359-b5f6-6951e26ba7d2\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.511183 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/0c93d652-05e2-4359-b5f6-6951e26ba7d2-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-qvz9s\" (UID: \"0c93d652-05e2-4359-b5f6-6951e26ba7d2\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.514265 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.521542 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lshhd\" (UniqueName: \"kubernetes.io/projected/0c93d652-05e2-4359-b5f6-6951e26ba7d2-kube-api-access-lshhd\") pod \"logging-loki-query-frontend-84558f7c9f-qvz9s\" (UID: \"0c93d652-05e2-4359-b5f6-6951e26ba7d2\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.579480 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.603078 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/f025c7ee-097c-4915-9946-41b57f995f0d-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.603124 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f025c7ee-097c-4915-9946-41b57f995f0d-logging-loki-ca-bundle\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.603145 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c24389d6-c229-4c2b-9933-61cd5f9b81d3-logging-loki-ca-bundle\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.603171 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/f025c7ee-097c-4915-9946-41b57f995f0d-lokistack-gateway\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.603191 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/c24389d6-c229-4c2b-9933-61cd5f9b81d3-lokistack-gateway\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.603209 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/c24389d6-c229-4c2b-9933-61cd5f9b81d3-rbac\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.603226 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/c24389d6-c229-4c2b-9933-61cd5f9b81d3-tenants\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.603252 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c24389d6-c229-4c2b-9933-61cd5f9b81d3-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.603270 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwsvh\" (UniqueName: \"kubernetes.io/projected/f025c7ee-097c-4915-9946-41b57f995f0d-kube-api-access-zwsvh\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.603294 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/f025c7ee-097c-4915-9946-41b57f995f0d-tenants\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.603307 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/c24389d6-c229-4c2b-9933-61cd5f9b81d3-tls-secret\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.603336 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/f025c7ee-097c-4915-9946-41b57f995f0d-tls-secret\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.603351 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/c24389d6-c229-4c2b-9933-61cd5f9b81d3-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.603374 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwddl\" (UniqueName: \"kubernetes.io/projected/c24389d6-c229-4c2b-9933-61cd5f9b81d3-kube-api-access-rwddl\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.603392 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/f025c7ee-097c-4915-9946-41b57f995f0d-rbac\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.603413 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f025c7ee-097c-4915-9946-41b57f995f0d-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.604527 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c24389d6-c229-4c2b-9933-61cd5f9b81d3-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.605475 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/c24389d6-c229-4c2b-9933-61cd5f9b81d3-rbac\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.605737 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/f025c7ee-097c-4915-9946-41b57f995f0d-lokistack-gateway\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.606096 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f025c7ee-097c-4915-9946-41b57f995f0d-logging-loki-ca-bundle\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.606110 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/c24389d6-c229-4c2b-9933-61cd5f9b81d3-lokistack-gateway\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.606397 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c24389d6-c229-4c2b-9933-61cd5f9b81d3-logging-loki-ca-bundle\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.607937 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/f025c7ee-097c-4915-9946-41b57f995f0d-rbac\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.608185 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/f025c7ee-097c-4915-9946-41b57f995f0d-tls-secret\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.608783 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f025c7ee-097c-4915-9946-41b57f995f0d-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.609294 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/f025c7ee-097c-4915-9946-41b57f995f0d-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.611236 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/c24389d6-c229-4c2b-9933-61cd5f9b81d3-tenants\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.611335 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/c24389d6-c229-4c2b-9933-61cd5f9b81d3-tls-secret\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.618327 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/c24389d6-c229-4c2b-9933-61cd5f9b81d3-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.619961 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/f025c7ee-097c-4915-9946-41b57f995f0d-tenants\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.628354 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwsvh\" (UniqueName: \"kubernetes.io/projected/f025c7ee-097c-4915-9946-41b57f995f0d-kube-api-access-zwsvh\") pod \"logging-loki-gateway-797bc7dfc5-fjsvj\" (UID: \"f025c7ee-097c-4915-9946-41b57f995f0d\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.631242 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwddl\" (UniqueName: \"kubernetes.io/projected/c24389d6-c229-4c2b-9933-61cd5f9b81d3-kube-api-access-rwddl\") pod \"logging-loki-gateway-797bc7dfc5-zd8bg\" (UID: \"c24389d6-c229-4c2b-9933-61cd5f9b81d3\") " pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.633702 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7"] Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.717772 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.762916 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.857050 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" event={"ID":"37a01398-aa18-423a-8fa0-b3d1f5fe0cfd","Type":"ContainerStarted","Data":"601d3fdfc4c602a90eebe1323467042162db5402f82b0f9798e410cd6bde9995"} Nov 24 13:30:32 crc kubenswrapper[5039]: I1124 13:30:32.970497 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-4xrbf"] Nov 24 13:30:32 crc kubenswrapper[5039]: W1124 13:30:32.986771 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf3453a5d_07da_4391_a2d5_df5154962b61.slice/crio-de24b73b23cbf00b5ed93a25887aacefb74ed60aee8ec281ce1919b8a786a779 WatchSource:0}: Error finding container de24b73b23cbf00b5ed93a25887aacefb74ed60aee8ec281ce1919b8a786a779: Status 404 returned error can't find the container with id de24b73b23cbf00b5ed93a25887aacefb74ed60aee8ec281ce1919b8a786a779 Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.105852 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s"] Nov 24 13:30:33 crc kubenswrapper[5039]: W1124 13:30:33.107071 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0c93d652_05e2_4359_b5f6_6951e26ba7d2.slice/crio-0c34cf1c3db78d531e8da9fe3d23e2295a6a4ac70d6b1b00a5ede2bd1f5528eb WatchSource:0}: Error finding container 0c34cf1c3db78d531e8da9fe3d23e2295a6a4ac70d6b1b00a5ede2bd1f5528eb: Status 404 returned error can't find the container with id 0c34cf1c3db78d531e8da9fe3d23e2295a6a4ac70d6b1b00a5ede2bd1f5528eb Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.129781 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg"] Nov 24 13:30:33 crc kubenswrapper[5039]: W1124 13:30:33.143262 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc24389d6_c229_4c2b_9933_61cd5f9b81d3.slice/crio-d7da5746ab3abbc54dbf9c63fefb4b2e5757da21ce7dfb9ecfddb6a90e732451 WatchSource:0}: Error finding container d7da5746ab3abbc54dbf9c63fefb4b2e5757da21ce7dfb9ecfddb6a90e732451: Status 404 returned error can't find the container with id d7da5746ab3abbc54dbf9c63fefb4b2e5757da21ce7dfb9ecfddb6a90e732451 Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.151855 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj"] Nov 24 13:30:33 crc kubenswrapper[5039]: W1124 13:30:33.155579 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf025c7ee_097c_4915_9946_41b57f995f0d.slice/crio-3d6d554ec6a139c4ce28ac7300bcbcb2fcbdb5820c7f638c5dd9b111d8dfa492 WatchSource:0}: Error finding container 3d6d554ec6a139c4ce28ac7300bcbcb2fcbdb5820c7f638c5dd9b111d8dfa492: Status 404 returned error can't find the container with id 3d6d554ec6a139c4ce28ac7300bcbcb2fcbdb5820c7f638c5dd9b111d8dfa492 Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.233599 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.234340 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.236815 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-http" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.237157 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-grpc" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.245223 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.269687 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.270866 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.279782 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-http" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.280453 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-grpc" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.296579 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.332273 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.333196 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.338148 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-http" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.338770 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-grpc" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.341773 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.413345 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/a1e6b0b7-32a0-465a-a329-d060dbf0b8f9-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") " pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.413398 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/a1e6b0b7-32a0-465a-a329-d060dbf0b8f9-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") " pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.413458 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1e6b0b7-32a0-465a-a329-d060dbf0b8f9-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") " pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.413524 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-ec79467d-fe89-428c-9247-6fb16e86874e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ec79467d-fe89-428c-9247-6fb16e86874e\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.413567 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/ffdab614-73c1-4ac9-adba-d2ec7ce81550-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.413714 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-37d0beca-60d2-4533-adc3-9121de16d6c8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-37d0beca-60d2-4533-adc3-9121de16d6c8\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.413788 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1e6b0b7-32a0-465a-a329-d060dbf0b8f9-config\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") " pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.413826 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/a1e6b0b7-32a0-465a-a329-d060dbf0b8f9-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") " pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.413854 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffdab614-73c1-4ac9-adba-d2ec7ce81550-config\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.413874 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-36d2572d-c048-4cb1-bea2-50b61e3d7221\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-36d2572d-c048-4cb1-bea2-50b61e3d7221\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") " pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.413901 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/ffdab614-73c1-4ac9-adba-d2ec7ce81550-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.413917 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/ffdab614-73c1-4ac9-adba-d2ec7ce81550-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.413950 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ffdab614-73c1-4ac9-adba-d2ec7ce81550-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.413967 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvtkx\" (UniqueName: \"kubernetes.io/projected/ffdab614-73c1-4ac9-adba-d2ec7ce81550-kube-api-access-zvtkx\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.413987 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52scb\" (UniqueName: \"kubernetes.io/projected/a1e6b0b7-32a0-465a-a329-d060dbf0b8f9-kube-api-access-52scb\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") " pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.515920 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52scb\" (UniqueName: \"kubernetes.io/projected/a1e6b0b7-32a0-465a-a329-d060dbf0b8f9-kube-api-access-52scb\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") " pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.516346 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/a1e6b0b7-32a0-465a-a329-d060dbf0b8f9-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") " pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.516372 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/a1e6b0b7-32a0-465a-a329-d060dbf0b8f9-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") " pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.516395 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-0a90d5da-0e2c-4be1-bdcd-c69cfcf22f1b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0a90d5da-0e2c-4be1-bdcd-c69cfcf22f1b\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.516428 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1e6b0b7-32a0-465a-a329-d060dbf0b8f9-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") " pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.516450 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-ec79467d-fe89-428c-9247-6fb16e86874e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ec79467d-fe89-428c-9247-6fb16e86874e\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.516477 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/ffdab614-73c1-4ac9-adba-d2ec7ce81550-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.516518 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvct5\" (UniqueName: \"kubernetes.io/projected/771e7704-d64a-4536-adfb-2ca0e6356956-kube-api-access-mvct5\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.516541 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/771e7704-d64a-4536-adfb-2ca0e6356956-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.516563 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-37d0beca-60d2-4533-adc3-9121de16d6c8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-37d0beca-60d2-4533-adc3-9121de16d6c8\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.516610 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/771e7704-d64a-4536-adfb-2ca0e6356956-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.516645 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1e6b0b7-32a0-465a-a329-d060dbf0b8f9-config\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") " pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.516666 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/a1e6b0b7-32a0-465a-a329-d060dbf0b8f9-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") " pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.516686 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/771e7704-d64a-4536-adfb-2ca0e6356956-config\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.516702 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffdab614-73c1-4ac9-adba-d2ec7ce81550-config\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.516719 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-36d2572d-c048-4cb1-bea2-50b61e3d7221\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-36d2572d-c048-4cb1-bea2-50b61e3d7221\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") " pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.516735 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/ffdab614-73c1-4ac9-adba-d2ec7ce81550-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.516754 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/771e7704-d64a-4536-adfb-2ca0e6356956-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.516771 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/ffdab614-73c1-4ac9-adba-d2ec7ce81550-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.516787 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/771e7704-d64a-4536-adfb-2ca0e6356956-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.516823 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ffdab614-73c1-4ac9-adba-d2ec7ce81550-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.516842 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvtkx\" (UniqueName: \"kubernetes.io/projected/ffdab614-73c1-4ac9-adba-d2ec7ce81550-kube-api-access-zvtkx\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.525829 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/a1e6b0b7-32a0-465a-a329-d060dbf0b8f9-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") " pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.527134 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1e6b0b7-32a0-465a-a329-d060dbf0b8f9-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") " pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.528263 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/a1e6b0b7-32a0-465a-a329-d060dbf0b8f9-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") " pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.528292 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffdab614-73c1-4ac9-adba-d2ec7ce81550-config\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.529293 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ffdab614-73c1-4ac9-adba-d2ec7ce81550-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.529742 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1e6b0b7-32a0-465a-a329-d060dbf0b8f9-config\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") " pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.529914 5039 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.529938 5039 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.529985 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-36d2572d-c048-4cb1-bea2-50b61e3d7221\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-36d2572d-c048-4cb1-bea2-50b61e3d7221\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/3f1dfc112a8a79aa324d56f6f3345ac1e1020a27350377ba0f2ae20050d8522f/globalmount\"" pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.529939 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-ec79467d-fe89-428c-9247-6fb16e86874e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ec79467d-fe89-428c-9247-6fb16e86874e\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/16949596f587d7c9431fff6d61e9f6b05f3c419351418a2ee1863606b1d3e8bd/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.531003 5039 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.531112 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-37d0beca-60d2-4533-adc3-9121de16d6c8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-37d0beca-60d2-4533-adc3-9121de16d6c8\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/b3e680cac8862ecde849829bacaff5193442b67e4795f0c57d471b5e6d42433b/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.532649 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvtkx\" (UniqueName: \"kubernetes.io/projected/ffdab614-73c1-4ac9-adba-d2ec7ce81550-kube-api-access-zvtkx\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.533270 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/ffdab614-73c1-4ac9-adba-d2ec7ce81550-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.533788 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/a1e6b0b7-32a0-465a-a329-d060dbf0b8f9-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") " pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.535960 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/ffdab614-73c1-4ac9-adba-d2ec7ce81550-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.541117 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/ffdab614-73c1-4ac9-adba-d2ec7ce81550-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.542208 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52scb\" (UniqueName: \"kubernetes.io/projected/a1e6b0b7-32a0-465a-a329-d060dbf0b8f9-kube-api-access-52scb\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") " pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.563561 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-37d0beca-60d2-4533-adc3-9121de16d6c8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-37d0beca-60d2-4533-adc3-9121de16d6c8\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.568053 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-36d2572d-c048-4cb1-bea2-50b61e3d7221\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-36d2572d-c048-4cb1-bea2-50b61e3d7221\") pod \"logging-loki-compactor-0\" (UID: \"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9\") " pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.569450 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-ec79467d-fe89-428c-9247-6fb16e86874e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ec79467d-fe89-428c-9247-6fb16e86874e\") pod \"logging-loki-ingester-0\" (UID: \"ffdab614-73c1-4ac9-adba-d2ec7ce81550\") " pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.595901 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.617832 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/771e7704-d64a-4536-adfb-2ca0e6356956-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.617885 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/771e7704-d64a-4536-adfb-2ca0e6356956-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.617951 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-0a90d5da-0e2c-4be1-bdcd-c69cfcf22f1b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0a90d5da-0e2c-4be1-bdcd-c69cfcf22f1b\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.618010 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvct5\" (UniqueName: \"kubernetes.io/projected/771e7704-d64a-4536-adfb-2ca0e6356956-kube-api-access-mvct5\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.618036 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/771e7704-d64a-4536-adfb-2ca0e6356956-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.618064 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/771e7704-d64a-4536-adfb-2ca0e6356956-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.618095 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/771e7704-d64a-4536-adfb-2ca0e6356956-config\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.618695 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/771e7704-d64a-4536-adfb-2ca0e6356956-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.619676 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/771e7704-d64a-4536-adfb-2ca0e6356956-config\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.620731 5039 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.620760 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-0a90d5da-0e2c-4be1-bdcd-c69cfcf22f1b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0a90d5da-0e2c-4be1-bdcd-c69cfcf22f1b\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e25328fb1f2ea940b22a7aedac91e0c5977f5079b69cb7b9f68084987e02aecc/globalmount\"" pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.621478 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/771e7704-d64a-4536-adfb-2ca0e6356956-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.623534 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/771e7704-d64a-4536-adfb-2ca0e6356956-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.626204 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/771e7704-d64a-4536-adfb-2ca0e6356956-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.640583 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvct5\" (UniqueName: \"kubernetes.io/projected/771e7704-d64a-4536-adfb-2ca0e6356956-kube-api-access-mvct5\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.642373 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-0a90d5da-0e2c-4be1-bdcd-c69cfcf22f1b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0a90d5da-0e2c-4be1-bdcd-c69cfcf22f1b\") pod \"logging-loki-index-gateway-0\" (UID: \"771e7704-d64a-4536-adfb-2ca0e6356956\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.654227 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.857552 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.869315 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" event={"ID":"c24389d6-c229-4c2b-9933-61cd5f9b81d3","Type":"ContainerStarted","Data":"d7da5746ab3abbc54dbf9c63fefb4b2e5757da21ce7dfb9ecfddb6a90e732451"} Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.870721 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" event={"ID":"f025c7ee-097c-4915-9946-41b57f995f0d","Type":"ContainerStarted","Data":"3d6d554ec6a139c4ce28ac7300bcbcb2fcbdb5820c7f638c5dd9b111d8dfa492"} Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.871896 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" event={"ID":"0c93d652-05e2-4359-b5f6-6951e26ba7d2","Type":"ContainerStarted","Data":"0c34cf1c3db78d531e8da9fe3d23e2295a6a4ac70d6b1b00a5ede2bd1f5528eb"} Nov 24 13:30:33 crc kubenswrapper[5039]: I1124 13:30:33.874624 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" event={"ID":"f3453a5d-07da-4391-a2d5-df5154962b61","Type":"ContainerStarted","Data":"de24b73b23cbf00b5ed93a25887aacefb74ed60aee8ec281ce1919b8a786a779"} Nov 24 13:30:34 crc kubenswrapper[5039]: I1124 13:30:34.049172 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Nov 24 13:30:34 crc kubenswrapper[5039]: W1124 13:30:34.060215 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda1e6b0b7_32a0_465a_a329_d060dbf0b8f9.slice/crio-64fc98cf3cf09646e432615640bec5fa8ac255d343230a58eed8bd24e768a32c WatchSource:0}: Error finding container 64fc98cf3cf09646e432615640bec5fa8ac255d343230a58eed8bd24e768a32c: Status 404 returned error can't find the container with id 64fc98cf3cf09646e432615640bec5fa8ac255d343230a58eed8bd24e768a32c Nov 24 13:30:34 crc kubenswrapper[5039]: I1124 13:30:34.125186 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Nov 24 13:30:34 crc kubenswrapper[5039]: I1124 13:30:34.276423 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Nov 24 13:30:34 crc kubenswrapper[5039]: W1124 13:30:34.285287 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podffdab614_73c1_4ac9_adba_d2ec7ce81550.slice/crio-297dc56a9ef1e38d7ace4884f6a7b0d2367cdd03759313d6bbddb6c6617dbebe WatchSource:0}: Error finding container 297dc56a9ef1e38d7ace4884f6a7b0d2367cdd03759313d6bbddb6c6617dbebe: Status 404 returned error can't find the container with id 297dc56a9ef1e38d7ace4884f6a7b0d2367cdd03759313d6bbddb6c6617dbebe Nov 24 13:30:34 crc kubenswrapper[5039]: I1124 13:30:34.883333 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"771e7704-d64a-4536-adfb-2ca0e6356956","Type":"ContainerStarted","Data":"73a08a62d54c8194ab4681920e0961e6a10ab7377fa7d5f9fa82301d19790dd0"} Nov 24 13:30:34 crc kubenswrapper[5039]: I1124 13:30:34.884883 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"ffdab614-73c1-4ac9-adba-d2ec7ce81550","Type":"ContainerStarted","Data":"297dc56a9ef1e38d7ace4884f6a7b0d2367cdd03759313d6bbddb6c6617dbebe"} Nov 24 13:30:34 crc kubenswrapper[5039]: I1124 13:30:34.886491 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9","Type":"ContainerStarted","Data":"64fc98cf3cf09646e432615640bec5fa8ac255d343230a58eed8bd24e768a32c"} Nov 24 13:30:37 crc kubenswrapper[5039]: I1124 13:30:37.905793 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" event={"ID":"f025c7ee-097c-4915-9946-41b57f995f0d","Type":"ContainerStarted","Data":"507f2177ad5bc081e9fcc5f9ebeab2b674ef031426ca969d7a9331b4667a5303"} Nov 24 13:30:37 crc kubenswrapper[5039]: I1124 13:30:37.907397 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" event={"ID":"0c93d652-05e2-4359-b5f6-6951e26ba7d2","Type":"ContainerStarted","Data":"43cdd34ec10630085c07ce34586dac78d2581e8b4ef04395d4c4fa08d76735f8"} Nov 24 13:30:37 crc kubenswrapper[5039]: I1124 13:30:37.907826 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" Nov 24 13:30:37 crc kubenswrapper[5039]: I1124 13:30:37.908914 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" event={"ID":"f3453a5d-07da-4391-a2d5-df5154962b61","Type":"ContainerStarted","Data":"d080c4ebc400cefeab19d78bcadfb04ea15a03205026cedb150d2072564f1c98"} Nov 24 13:30:37 crc kubenswrapper[5039]: I1124 13:30:37.909081 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:37 crc kubenswrapper[5039]: I1124 13:30:37.910238 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"771e7704-d64a-4536-adfb-2ca0e6356956","Type":"ContainerStarted","Data":"433e5fa637216c80b10ea29d23a5cc675c0292e0394aa3304e6c374dda212759"} Nov 24 13:30:37 crc kubenswrapper[5039]: I1124 13:30:37.910337 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:37 crc kubenswrapper[5039]: I1124 13:30:37.911416 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"ffdab614-73c1-4ac9-adba-d2ec7ce81550","Type":"ContainerStarted","Data":"9be56de4bc129d623ec6d06705efe43bffda0a31c09a5c373730537cfd540efd"} Nov 24 13:30:37 crc kubenswrapper[5039]: I1124 13:30:37.911561 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:30:37 crc kubenswrapper[5039]: I1124 13:30:37.912738 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" event={"ID":"c24389d6-c229-4c2b-9933-61cd5f9b81d3","Type":"ContainerStarted","Data":"d6cb19f4c5315b7acba477d4f3acc8f41c28e5b5408b3fa6989bb367b8b17a01"} Nov 24 13:30:37 crc kubenswrapper[5039]: I1124 13:30:37.914225 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" event={"ID":"37a01398-aa18-423a-8fa0-b3d1f5fe0cfd","Type":"ContainerStarted","Data":"182210aeea0b6520afe51617ef26a8b98744d6a5494dbfe0b9e2190d65da6412"} Nov 24 13:30:37 crc kubenswrapper[5039]: I1124 13:30:37.914263 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" Nov 24 13:30:37 crc kubenswrapper[5039]: I1124 13:30:37.915365 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"a1e6b0b7-32a0-465a-a329-d060dbf0b8f9","Type":"ContainerStarted","Data":"b65b7d642f6fd5c9c3f1ad9c96bd30e5da780efe77efd3777bd42f97f3958baf"} Nov 24 13:30:37 crc kubenswrapper[5039]: I1124 13:30:37.915618 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:37 crc kubenswrapper[5039]: I1124 13:30:37.928039 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" podStartSLOduration=2.026132693 podStartE2EDuration="5.928020156s" podCreationTimestamp="2025-11-24 13:30:32 +0000 UTC" firstStartedPulling="2025-11-24 13:30:33.11017171 +0000 UTC m=+745.549296210" lastFinishedPulling="2025-11-24 13:30:37.012059173 +0000 UTC m=+749.451183673" observedRunningTime="2025-11-24 13:30:37.925426094 +0000 UTC m=+750.364550594" watchObservedRunningTime="2025-11-24 13:30:37.928020156 +0000 UTC m=+750.367144656" Nov 24 13:30:37 crc kubenswrapper[5039]: I1124 13:30:37.951575 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-ingester-0" podStartSLOduration=3.172883871 podStartE2EDuration="5.951556193s" podCreationTimestamp="2025-11-24 13:30:32 +0000 UTC" firstStartedPulling="2025-11-24 13:30:34.287391751 +0000 UTC m=+746.726516251" lastFinishedPulling="2025-11-24 13:30:37.066064073 +0000 UTC m=+749.505188573" observedRunningTime="2025-11-24 13:30:37.944088033 +0000 UTC m=+750.383212533" watchObservedRunningTime="2025-11-24 13:30:37.951556193 +0000 UTC m=+750.390680693" Nov 24 13:30:37 crc kubenswrapper[5039]: I1124 13:30:37.964954 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" podStartSLOduration=1.982571026 podStartE2EDuration="5.964939315s" podCreationTimestamp="2025-11-24 13:30:32 +0000 UTC" firstStartedPulling="2025-11-24 13:30:32.988685997 +0000 UTC m=+745.427810497" lastFinishedPulling="2025-11-24 13:30:36.971054276 +0000 UTC m=+749.410178786" observedRunningTime="2025-11-24 13:30:37.961540463 +0000 UTC m=+750.400664953" watchObservedRunningTime="2025-11-24 13:30:37.964939315 +0000 UTC m=+750.404063805" Nov 24 13:30:37 crc kubenswrapper[5039]: I1124 13:30:37.979297 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-index-gateway-0" podStartSLOduration=3.034332046 podStartE2EDuration="5.97927879s" podCreationTimestamp="2025-11-24 13:30:32 +0000 UTC" firstStartedPulling="2025-11-24 13:30:34.141902119 +0000 UTC m=+746.581026619" lastFinishedPulling="2025-11-24 13:30:37.086848853 +0000 UTC m=+749.525973363" observedRunningTime="2025-11-24 13:30:37.976710598 +0000 UTC m=+750.415835098" watchObservedRunningTime="2025-11-24 13:30:37.97927879 +0000 UTC m=+750.418403290" Nov 24 13:30:37 crc kubenswrapper[5039]: I1124 13:30:37.993099 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" podStartSLOduration=1.5832547350000001 podStartE2EDuration="5.993086762s" podCreationTimestamp="2025-11-24 13:30:32 +0000 UTC" firstStartedPulling="2025-11-24 13:30:32.648637603 +0000 UTC m=+745.087762103" lastFinishedPulling="2025-11-24 13:30:37.05846963 +0000 UTC m=+749.497594130" observedRunningTime="2025-11-24 13:30:37.989930397 +0000 UTC m=+750.429054897" watchObservedRunningTime="2025-11-24 13:30:37.993086762 +0000 UTC m=+750.432211262" Nov 24 13:30:38 crc kubenswrapper[5039]: I1124 13:30:38.020311 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-compactor-0" podStartSLOduration=3.018897925 podStartE2EDuration="6.020294217s" podCreationTimestamp="2025-11-24 13:30:32 +0000 UTC" firstStartedPulling="2025-11-24 13:30:34.064382384 +0000 UTC m=+746.503506884" lastFinishedPulling="2025-11-24 13:30:37.065778676 +0000 UTC m=+749.504903176" observedRunningTime="2025-11-24 13:30:38.019837456 +0000 UTC m=+750.458961956" watchObservedRunningTime="2025-11-24 13:30:38.020294217 +0000 UTC m=+750.459418717" Nov 24 13:30:38 crc kubenswrapper[5039]: I1124 13:30:38.717138 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xgtj9"] Nov 24 13:30:38 crc kubenswrapper[5039]: I1124 13:30:38.717376 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" podUID="0883a675-24bc-4d9b-b318-feee05e49135" containerName="controller-manager" containerID="cri-o://b4b232d3808308ce64c298612516c0fbb2bad72e3398b5416091ea205cc3d8e2" gracePeriod=30 Nov 24 13:30:38 crc kubenswrapper[5039]: I1124 13:30:38.729111 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7"] Nov 24 13:30:38 crc kubenswrapper[5039]: I1124 13:30:38.729411 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" podUID="a6decbe9-edda-413b-b067-665ccf6efece" containerName="route-controller-manager" containerID="cri-o://5b0e38068a8f2cc4709f6aaa0e8433c63c1c0fabbb4ea4702466f9e1c5c81a57" gracePeriod=30 Nov 24 13:30:38 crc kubenswrapper[5039]: I1124 13:30:38.921011 5039 generic.go:334] "Generic (PLEG): container finished" podID="0883a675-24bc-4d9b-b318-feee05e49135" containerID="b4b232d3808308ce64c298612516c0fbb2bad72e3398b5416091ea205cc3d8e2" exitCode=0 Nov 24 13:30:38 crc kubenswrapper[5039]: I1124 13:30:38.921070 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" event={"ID":"0883a675-24bc-4d9b-b318-feee05e49135","Type":"ContainerDied","Data":"b4b232d3808308ce64c298612516c0fbb2bad72e3398b5416091ea205cc3d8e2"} Nov 24 13:30:38 crc kubenswrapper[5039]: I1124 13:30:38.922242 5039 generic.go:334] "Generic (PLEG): container finished" podID="a6decbe9-edda-413b-b067-665ccf6efece" containerID="5b0e38068a8f2cc4709f6aaa0e8433c63c1c0fabbb4ea4702466f9e1c5c81a57" exitCode=0 Nov 24 13:30:38 crc kubenswrapper[5039]: I1124 13:30:38.922909 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" event={"ID":"a6decbe9-edda-413b-b067-665ccf6efece","Type":"ContainerDied","Data":"5b0e38068a8f2cc4709f6aaa0e8433c63c1c0fabbb4ea4702466f9e1c5c81a57"} Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.286765 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.319293 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.324497 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0883a675-24bc-4d9b-b318-feee05e49135-client-ca\") pod \"0883a675-24bc-4d9b-b318-feee05e49135\" (UID: \"0883a675-24bc-4d9b-b318-feee05e49135\") " Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.324587 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0883a675-24bc-4d9b-b318-feee05e49135-config\") pod \"0883a675-24bc-4d9b-b318-feee05e49135\" (UID: \"0883a675-24bc-4d9b-b318-feee05e49135\") " Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.324629 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0883a675-24bc-4d9b-b318-feee05e49135-serving-cert\") pod \"0883a675-24bc-4d9b-b318-feee05e49135\" (UID: \"0883a675-24bc-4d9b-b318-feee05e49135\") " Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.324695 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0883a675-24bc-4d9b-b318-feee05e49135-proxy-ca-bundles\") pod \"0883a675-24bc-4d9b-b318-feee05e49135\" (UID: \"0883a675-24bc-4d9b-b318-feee05e49135\") " Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.324718 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z6x2n\" (UniqueName: \"kubernetes.io/projected/0883a675-24bc-4d9b-b318-feee05e49135-kube-api-access-z6x2n\") pod \"0883a675-24bc-4d9b-b318-feee05e49135\" (UID: \"0883a675-24bc-4d9b-b318-feee05e49135\") " Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.325457 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0883a675-24bc-4d9b-b318-feee05e49135-client-ca" (OuterVolumeSpecName: "client-ca") pod "0883a675-24bc-4d9b-b318-feee05e49135" (UID: "0883a675-24bc-4d9b-b318-feee05e49135"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.325474 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0883a675-24bc-4d9b-b318-feee05e49135-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "0883a675-24bc-4d9b-b318-feee05e49135" (UID: "0883a675-24bc-4d9b-b318-feee05e49135"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.325824 5039 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0883a675-24bc-4d9b-b318-feee05e49135-client-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.325844 5039 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0883a675-24bc-4d9b-b318-feee05e49135-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.326130 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0883a675-24bc-4d9b-b318-feee05e49135-config" (OuterVolumeSpecName: "config") pod "0883a675-24bc-4d9b-b318-feee05e49135" (UID: "0883a675-24bc-4d9b-b318-feee05e49135"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.331372 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0883a675-24bc-4d9b-b318-feee05e49135-kube-api-access-z6x2n" (OuterVolumeSpecName: "kube-api-access-z6x2n") pod "0883a675-24bc-4d9b-b318-feee05e49135" (UID: "0883a675-24bc-4d9b-b318-feee05e49135"). InnerVolumeSpecName "kube-api-access-z6x2n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.331938 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0883a675-24bc-4d9b-b318-feee05e49135-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0883a675-24bc-4d9b-b318-feee05e49135" (UID: "0883a675-24bc-4d9b-b318-feee05e49135"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.427066 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2flbf\" (UniqueName: \"kubernetes.io/projected/a6decbe9-edda-413b-b067-665ccf6efece-kube-api-access-2flbf\") pod \"a6decbe9-edda-413b-b067-665ccf6efece\" (UID: \"a6decbe9-edda-413b-b067-665ccf6efece\") " Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.427180 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6decbe9-edda-413b-b067-665ccf6efece-config\") pod \"a6decbe9-edda-413b-b067-665ccf6efece\" (UID: \"a6decbe9-edda-413b-b067-665ccf6efece\") " Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.427218 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a6decbe9-edda-413b-b067-665ccf6efece-serving-cert\") pod \"a6decbe9-edda-413b-b067-665ccf6efece\" (UID: \"a6decbe9-edda-413b-b067-665ccf6efece\") " Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.427277 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a6decbe9-edda-413b-b067-665ccf6efece-client-ca\") pod \"a6decbe9-edda-413b-b067-665ccf6efece\" (UID: \"a6decbe9-edda-413b-b067-665ccf6efece\") " Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.427620 5039 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0883a675-24bc-4d9b-b318-feee05e49135-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.427646 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z6x2n\" (UniqueName: \"kubernetes.io/projected/0883a675-24bc-4d9b-b318-feee05e49135-kube-api-access-z6x2n\") on node \"crc\" DevicePath \"\"" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.428152 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6decbe9-edda-413b-b067-665ccf6efece-client-ca" (OuterVolumeSpecName: "client-ca") pod "a6decbe9-edda-413b-b067-665ccf6efece" (UID: "a6decbe9-edda-413b-b067-665ccf6efece"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.427660 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0883a675-24bc-4d9b-b318-feee05e49135-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.428279 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6decbe9-edda-413b-b067-665ccf6efece-config" (OuterVolumeSpecName: "config") pod "a6decbe9-edda-413b-b067-665ccf6efece" (UID: "a6decbe9-edda-413b-b067-665ccf6efece"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.430909 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6decbe9-edda-413b-b067-665ccf6efece-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "a6decbe9-edda-413b-b067-665ccf6efece" (UID: "a6decbe9-edda-413b-b067-665ccf6efece"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.431184 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6decbe9-edda-413b-b067-665ccf6efece-kube-api-access-2flbf" (OuterVolumeSpecName: "kube-api-access-2flbf") pod "a6decbe9-edda-413b-b067-665ccf6efece" (UID: "a6decbe9-edda-413b-b067-665ccf6efece"). InnerVolumeSpecName "kube-api-access-2flbf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.529400 5039 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a6decbe9-edda-413b-b067-665ccf6efece-client-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.529443 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2flbf\" (UniqueName: \"kubernetes.io/projected/a6decbe9-edda-413b-b067-665ccf6efece-kube-api-access-2flbf\") on node \"crc\" DevicePath \"\"" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.529456 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6decbe9-edda-413b-b067-665ccf6efece-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.529464 5039 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a6decbe9-edda-413b-b067-665ccf6efece-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.930242 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" event={"ID":"f025c7ee-097c-4915-9946-41b57f995f0d","Type":"ContainerStarted","Data":"ee3fae458f2bf238dcbdc3ccf9de83c01e13818ca3695a72194acdf7f96ecbd0"} Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.930429 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.930446 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.932365 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" event={"ID":"a6decbe9-edda-413b-b067-665ccf6efece","Type":"ContainerDied","Data":"0ca39ffe9c5083956a5ed8faa8eb0c056cc2f0a50327923a03d561f4aa22fac1"} Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.932404 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.932451 5039 scope.go:117] "RemoveContainer" containerID="5b0e38068a8f2cc4709f6aaa0e8433c63c1c0fabbb4ea4702466f9e1c5c81a57" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.934134 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" event={"ID":"0883a675-24bc-4d9b-b318-feee05e49135","Type":"ContainerDied","Data":"d4f0f0f81919e0eff3f93e73c806d1fad65adaa1b3d548e5314e6ca872e954b2"} Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.934162 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xgtj9" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.941668 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" event={"ID":"c24389d6-c229-4c2b-9933-61cd5f9b81d3","Type":"ContainerStarted","Data":"854b6b33e5c4d1f95eedb839c0a1e50e664938736d582efb1f9ced4b7d946a68"} Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.942609 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.942771 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.949574 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.952338 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.953160 5039 scope.go:117] "RemoveContainer" containerID="b4b232d3808308ce64c298612516c0fbb2bad72e3398b5416091ea205cc3d8e2" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.955736 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.968219 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-797bc7dfc5-fjsvj" podStartSLOduration=2.031566944 podStartE2EDuration="7.968186795s" podCreationTimestamp="2025-11-24 13:30:32 +0000 UTC" firstStartedPulling="2025-11-24 13:30:33.157387116 +0000 UTC m=+745.596511616" lastFinishedPulling="2025-11-24 13:30:39.094006967 +0000 UTC m=+751.533131467" observedRunningTime="2025-11-24 13:30:39.964025675 +0000 UTC m=+752.403150175" watchObservedRunningTime="2025-11-24 13:30:39.968186795 +0000 UTC m=+752.407311345" Nov 24 13:30:39 crc kubenswrapper[5039]: I1124 13:30:39.979488 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.067989 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-797bc7dfc5-zd8bg" podStartSLOduration=2.123389654 podStartE2EDuration="8.067967856s" podCreationTimestamp="2025-11-24 13:30:32 +0000 UTC" firstStartedPulling="2025-11-24 13:30:33.147235502 +0000 UTC m=+745.586360002" lastFinishedPulling="2025-11-24 13:30:39.091813704 +0000 UTC m=+751.530938204" observedRunningTime="2025-11-24 13:30:40.059972153 +0000 UTC m=+752.499096663" watchObservedRunningTime="2025-11-24 13:30:40.067967856 +0000 UTC m=+752.507092356" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.088565 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xgtj9"] Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.093716 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xgtj9"] Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.106525 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7"] Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.110082 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lv5c7"] Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.314373 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0883a675-24bc-4d9b-b318-feee05e49135" path="/var/lib/kubelet/pods/0883a675-24bc-4d9b-b318-feee05e49135/volumes" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.315099 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6decbe9-edda-413b-b067-665ccf6efece" path="/var/lib/kubelet/pods/a6decbe9-edda-413b-b067-665ccf6efece/volumes" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.523927 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf"] Nov 24 13:30:40 crc kubenswrapper[5039]: E1124 13:30:40.524184 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6decbe9-edda-413b-b067-665ccf6efece" containerName="route-controller-manager" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.524198 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6decbe9-edda-413b-b067-665ccf6efece" containerName="route-controller-manager" Nov 24 13:30:40 crc kubenswrapper[5039]: E1124 13:30:40.524212 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0883a675-24bc-4d9b-b318-feee05e49135" containerName="controller-manager" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.524220 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0883a675-24bc-4d9b-b318-feee05e49135" containerName="controller-manager" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.524350 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6decbe9-edda-413b-b067-665ccf6efece" containerName="route-controller-manager" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.524370 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0883a675-24bc-4d9b-b318-feee05e49135" containerName="controller-manager" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.525002 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.527639 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.528015 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.528873 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.529211 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.529637 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.529969 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.534172 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-577b4c4b78-95gtv"] Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.535960 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.542214 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.542443 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.542615 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.542738 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.542957 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.543788 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.545449 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf"] Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.558463 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.565026 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-577b4c4b78-95gtv"] Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.646328 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bc5ae30b-22a9-4d53-b64d-bb98c2774e7a-proxy-ca-bundles\") pod \"controller-manager-577b4c4b78-95gtv\" (UID: \"bc5ae30b-22a9-4d53-b64d-bb98c2774e7a\") " pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.646393 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7j77\" (UniqueName: \"kubernetes.io/projected/bc5ae30b-22a9-4d53-b64d-bb98c2774e7a-kube-api-access-q7j77\") pod \"controller-manager-577b4c4b78-95gtv\" (UID: \"bc5ae30b-22a9-4d53-b64d-bb98c2774e7a\") " pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.646448 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5ae30b-22a9-4d53-b64d-bb98c2774e7a-serving-cert\") pod \"controller-manager-577b4c4b78-95gtv\" (UID: \"bc5ae30b-22a9-4d53-b64d-bb98c2774e7a\") " pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.646487 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8r5bp\" (UniqueName: \"kubernetes.io/projected/e614c6dc-e10f-43e0-9b0e-357d8f8d601d-kube-api-access-8r5bp\") pod \"route-controller-manager-6854746656-9tsqf\" (UID: \"e614c6dc-e10f-43e0-9b0e-357d8f8d601d\") " pod="openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.646616 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e614c6dc-e10f-43e0-9b0e-357d8f8d601d-client-ca\") pod \"route-controller-manager-6854746656-9tsqf\" (UID: \"e614c6dc-e10f-43e0-9b0e-357d8f8d601d\") " pod="openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.646726 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e614c6dc-e10f-43e0-9b0e-357d8f8d601d-config\") pod \"route-controller-manager-6854746656-9tsqf\" (UID: \"e614c6dc-e10f-43e0-9b0e-357d8f8d601d\") " pod="openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.646856 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e614c6dc-e10f-43e0-9b0e-357d8f8d601d-serving-cert\") pod \"route-controller-manager-6854746656-9tsqf\" (UID: \"e614c6dc-e10f-43e0-9b0e-357d8f8d601d\") " pod="openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.646897 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bc5ae30b-22a9-4d53-b64d-bb98c2774e7a-client-ca\") pod \"controller-manager-577b4c4b78-95gtv\" (UID: \"bc5ae30b-22a9-4d53-b64d-bb98c2774e7a\") " pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.646942 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc5ae30b-22a9-4d53-b64d-bb98c2774e7a-config\") pod \"controller-manager-577b4c4b78-95gtv\" (UID: \"bc5ae30b-22a9-4d53-b64d-bb98c2774e7a\") " pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.748638 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e614c6dc-e10f-43e0-9b0e-357d8f8d601d-client-ca\") pod \"route-controller-manager-6854746656-9tsqf\" (UID: \"e614c6dc-e10f-43e0-9b0e-357d8f8d601d\") " pod="openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.748763 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e614c6dc-e10f-43e0-9b0e-357d8f8d601d-config\") pod \"route-controller-manager-6854746656-9tsqf\" (UID: \"e614c6dc-e10f-43e0-9b0e-357d8f8d601d\") " pod="openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.748819 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e614c6dc-e10f-43e0-9b0e-357d8f8d601d-serving-cert\") pod \"route-controller-manager-6854746656-9tsqf\" (UID: \"e614c6dc-e10f-43e0-9b0e-357d8f8d601d\") " pod="openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.748858 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bc5ae30b-22a9-4d53-b64d-bb98c2774e7a-client-ca\") pod \"controller-manager-577b4c4b78-95gtv\" (UID: \"bc5ae30b-22a9-4d53-b64d-bb98c2774e7a\") " pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.748903 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc5ae30b-22a9-4d53-b64d-bb98c2774e7a-config\") pod \"controller-manager-577b4c4b78-95gtv\" (UID: \"bc5ae30b-22a9-4d53-b64d-bb98c2774e7a\") " pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.748963 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bc5ae30b-22a9-4d53-b64d-bb98c2774e7a-proxy-ca-bundles\") pod \"controller-manager-577b4c4b78-95gtv\" (UID: \"bc5ae30b-22a9-4d53-b64d-bb98c2774e7a\") " pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.748996 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7j77\" (UniqueName: \"kubernetes.io/projected/bc5ae30b-22a9-4d53-b64d-bb98c2774e7a-kube-api-access-q7j77\") pod \"controller-manager-577b4c4b78-95gtv\" (UID: \"bc5ae30b-22a9-4d53-b64d-bb98c2774e7a\") " pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.749045 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5ae30b-22a9-4d53-b64d-bb98c2774e7a-serving-cert\") pod \"controller-manager-577b4c4b78-95gtv\" (UID: \"bc5ae30b-22a9-4d53-b64d-bb98c2774e7a\") " pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.749084 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8r5bp\" (UniqueName: \"kubernetes.io/projected/e614c6dc-e10f-43e0-9b0e-357d8f8d601d-kube-api-access-8r5bp\") pod \"route-controller-manager-6854746656-9tsqf\" (UID: \"e614c6dc-e10f-43e0-9b0e-357d8f8d601d\") " pod="openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.749737 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bc5ae30b-22a9-4d53-b64d-bb98c2774e7a-client-ca\") pod \"controller-manager-577b4c4b78-95gtv\" (UID: \"bc5ae30b-22a9-4d53-b64d-bb98c2774e7a\") " pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.751033 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e614c6dc-e10f-43e0-9b0e-357d8f8d601d-client-ca\") pod \"route-controller-manager-6854746656-9tsqf\" (UID: \"e614c6dc-e10f-43e0-9b0e-357d8f8d601d\") " pod="openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.752820 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e614c6dc-e10f-43e0-9b0e-357d8f8d601d-config\") pod \"route-controller-manager-6854746656-9tsqf\" (UID: \"e614c6dc-e10f-43e0-9b0e-357d8f8d601d\") " pod="openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.752856 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc5ae30b-22a9-4d53-b64d-bb98c2774e7a-config\") pod \"controller-manager-577b4c4b78-95gtv\" (UID: \"bc5ae30b-22a9-4d53-b64d-bb98c2774e7a\") " pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.755083 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bc5ae30b-22a9-4d53-b64d-bb98c2774e7a-proxy-ca-bundles\") pod \"controller-manager-577b4c4b78-95gtv\" (UID: \"bc5ae30b-22a9-4d53-b64d-bb98c2774e7a\") " pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.756364 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e614c6dc-e10f-43e0-9b0e-357d8f8d601d-serving-cert\") pod \"route-controller-manager-6854746656-9tsqf\" (UID: \"e614c6dc-e10f-43e0-9b0e-357d8f8d601d\") " pod="openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.767458 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5ae30b-22a9-4d53-b64d-bb98c2774e7a-serving-cert\") pod \"controller-manager-577b4c4b78-95gtv\" (UID: \"bc5ae30b-22a9-4d53-b64d-bb98c2774e7a\") " pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.797083 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8r5bp\" (UniqueName: \"kubernetes.io/projected/e614c6dc-e10f-43e0-9b0e-357d8f8d601d-kube-api-access-8r5bp\") pod \"route-controller-manager-6854746656-9tsqf\" (UID: \"e614c6dc-e10f-43e0-9b0e-357d8f8d601d\") " pod="openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.799475 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7j77\" (UniqueName: \"kubernetes.io/projected/bc5ae30b-22a9-4d53-b64d-bb98c2774e7a-kube-api-access-q7j77\") pod \"controller-manager-577b4c4b78-95gtv\" (UID: \"bc5ae30b-22a9-4d53-b64d-bb98c2774e7a\") " pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.841722 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf" Nov 24 13:30:40 crc kubenswrapper[5039]: I1124 13:30:40.852988 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" Nov 24 13:30:41 crc kubenswrapper[5039]: I1124 13:30:41.075354 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf"] Nov 24 13:30:41 crc kubenswrapper[5039]: I1124 13:30:41.311196 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-577b4c4b78-95gtv"] Nov 24 13:30:41 crc kubenswrapper[5039]: I1124 13:30:41.963242 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf" event={"ID":"e614c6dc-e10f-43e0-9b0e-357d8f8d601d","Type":"ContainerStarted","Data":"789da93852f359ab300806ae88cdb4e7412d0942992c35c97b5bc0539623f7e6"} Nov 24 13:30:41 crc kubenswrapper[5039]: I1124 13:30:41.963619 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf" event={"ID":"e614c6dc-e10f-43e0-9b0e-357d8f8d601d","Type":"ContainerStarted","Data":"6fb25894c4ee0c61471b6756e0a9be666cd8cfefe0bce5f71a44b7cd9c4698ad"} Nov 24 13:30:41 crc kubenswrapper[5039]: I1124 13:30:41.964186 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf" Nov 24 13:30:41 crc kubenswrapper[5039]: I1124 13:30:41.966277 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" event={"ID":"bc5ae30b-22a9-4d53-b64d-bb98c2774e7a","Type":"ContainerStarted","Data":"92f886d9cf571dc57d025dbe5a46a24f986c1e2b7c3695c10f8d92dd537c99ef"} Nov 24 13:30:41 crc kubenswrapper[5039]: I1124 13:30:41.966329 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" event={"ID":"bc5ae30b-22a9-4d53-b64d-bb98c2774e7a","Type":"ContainerStarted","Data":"770046a7638b66522e8527d1427a2a4809f42551567c4b760d67830dba990738"} Nov 24 13:30:41 crc kubenswrapper[5039]: I1124 13:30:41.967483 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" Nov 24 13:30:41 crc kubenswrapper[5039]: I1124 13:30:41.971139 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf" Nov 24 13:30:41 crc kubenswrapper[5039]: I1124 13:30:41.974306 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" Nov 24 13:30:41 crc kubenswrapper[5039]: I1124 13:30:41.984334 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6854746656-9tsqf" podStartSLOduration=3.984194762 podStartE2EDuration="3.984194762s" podCreationTimestamp="2025-11-24 13:30:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:30:41.981472416 +0000 UTC m=+754.420596956" watchObservedRunningTime="2025-11-24 13:30:41.984194762 +0000 UTC m=+754.423319262" Nov 24 13:30:42 crc kubenswrapper[5039]: I1124 13:30:42.003852 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-577b4c4b78-95gtv" podStartSLOduration=4.003833694 podStartE2EDuration="4.003833694s" podCreationTimestamp="2025-11-24 13:30:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:30:41.998787323 +0000 UTC m=+754.437911853" watchObservedRunningTime="2025-11-24 13:30:42.003833694 +0000 UTC m=+754.442958204" Nov 24 13:30:45 crc kubenswrapper[5039]: I1124 13:30:45.583132 5039 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 24 13:30:50 crc kubenswrapper[5039]: I1124 13:30:50.101148 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:30:50 crc kubenswrapper[5039]: I1124 13:30:50.101572 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:30:50 crc kubenswrapper[5039]: I1124 13:30:50.101646 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:30:50 crc kubenswrapper[5039]: I1124 13:30:50.102451 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ad5ffc63035c78c438991177870b3e0e28e428524aad180cebafc49a63fbdb72"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 13:30:50 crc kubenswrapper[5039]: I1124 13:30:50.102592 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://ad5ffc63035c78c438991177870b3e0e28e428524aad180cebafc49a63fbdb72" gracePeriod=600 Nov 24 13:30:51 crc kubenswrapper[5039]: I1124 13:30:51.021495 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="ad5ffc63035c78c438991177870b3e0e28e428524aad180cebafc49a63fbdb72" exitCode=0 Nov 24 13:30:51 crc kubenswrapper[5039]: I1124 13:30:51.021584 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"ad5ffc63035c78c438991177870b3e0e28e428524aad180cebafc49a63fbdb72"} Nov 24 13:30:51 crc kubenswrapper[5039]: I1124 13:30:51.021873 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"066219b28d99610e4d1092b8b5a95d47b8b9f6102be58f3694f6d12e791d5f0b"} Nov 24 13:30:51 crc kubenswrapper[5039]: I1124 13:30:51.021893 5039 scope.go:117] "RemoveContainer" containerID="d1dbbf4f80166d075183d0109aa48d3369a50b433b63d157f67b87eea163a9c1" Nov 24 13:30:52 crc kubenswrapper[5039]: I1124 13:30:52.415743 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-distributor-76cc67bf56-xwwg7" Nov 24 13:30:52 crc kubenswrapper[5039]: I1124 13:30:52.520736 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-querier-5895d59bb8-4xrbf" Nov 24 13:30:52 crc kubenswrapper[5039]: I1124 13:30:52.587963 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-qvz9s" Nov 24 13:30:53 crc kubenswrapper[5039]: I1124 13:30:53.602572 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-compactor-0" Nov 24 13:30:53 crc kubenswrapper[5039]: I1124 13:30:53.662375 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-index-gateway-0" Nov 24 13:30:53 crc kubenswrapper[5039]: I1124 13:30:53.870138 5039 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Nov 24 13:30:53 crc kubenswrapper[5039]: I1124 13:30:53.870199 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="ffdab614-73c1-4ac9-adba-d2ec7ce81550" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 24 13:30:54 crc kubenswrapper[5039]: I1124 13:30:54.183384 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dnvxv"] Nov 24 13:30:54 crc kubenswrapper[5039]: I1124 13:30:54.185401 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dnvxv" Nov 24 13:30:54 crc kubenswrapper[5039]: I1124 13:30:54.196295 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dnvxv"] Nov 24 13:30:54 crc kubenswrapper[5039]: I1124 13:30:54.244889 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/742ddebe-c68e-420d-af73-9e853cc648af-catalog-content\") pod \"redhat-marketplace-dnvxv\" (UID: \"742ddebe-c68e-420d-af73-9e853cc648af\") " pod="openshift-marketplace/redhat-marketplace-dnvxv" Nov 24 13:30:54 crc kubenswrapper[5039]: I1124 13:30:54.244968 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/742ddebe-c68e-420d-af73-9e853cc648af-utilities\") pod \"redhat-marketplace-dnvxv\" (UID: \"742ddebe-c68e-420d-af73-9e853cc648af\") " pod="openshift-marketplace/redhat-marketplace-dnvxv" Nov 24 13:30:54 crc kubenswrapper[5039]: I1124 13:30:54.244994 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7tvw\" (UniqueName: \"kubernetes.io/projected/742ddebe-c68e-420d-af73-9e853cc648af-kube-api-access-l7tvw\") pod \"redhat-marketplace-dnvxv\" (UID: \"742ddebe-c68e-420d-af73-9e853cc648af\") " pod="openshift-marketplace/redhat-marketplace-dnvxv" Nov 24 13:30:54 crc kubenswrapper[5039]: I1124 13:30:54.346184 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/742ddebe-c68e-420d-af73-9e853cc648af-catalog-content\") pod \"redhat-marketplace-dnvxv\" (UID: \"742ddebe-c68e-420d-af73-9e853cc648af\") " pod="openshift-marketplace/redhat-marketplace-dnvxv" Nov 24 13:30:54 crc kubenswrapper[5039]: I1124 13:30:54.346242 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/742ddebe-c68e-420d-af73-9e853cc648af-utilities\") pod \"redhat-marketplace-dnvxv\" (UID: \"742ddebe-c68e-420d-af73-9e853cc648af\") " pod="openshift-marketplace/redhat-marketplace-dnvxv" Nov 24 13:30:54 crc kubenswrapper[5039]: I1124 13:30:54.346270 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7tvw\" (UniqueName: \"kubernetes.io/projected/742ddebe-c68e-420d-af73-9e853cc648af-kube-api-access-l7tvw\") pod \"redhat-marketplace-dnvxv\" (UID: \"742ddebe-c68e-420d-af73-9e853cc648af\") " pod="openshift-marketplace/redhat-marketplace-dnvxv" Nov 24 13:30:54 crc kubenswrapper[5039]: I1124 13:30:54.346775 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/742ddebe-c68e-420d-af73-9e853cc648af-catalog-content\") pod \"redhat-marketplace-dnvxv\" (UID: \"742ddebe-c68e-420d-af73-9e853cc648af\") " pod="openshift-marketplace/redhat-marketplace-dnvxv" Nov 24 13:30:54 crc kubenswrapper[5039]: I1124 13:30:54.346832 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/742ddebe-c68e-420d-af73-9e853cc648af-utilities\") pod \"redhat-marketplace-dnvxv\" (UID: \"742ddebe-c68e-420d-af73-9e853cc648af\") " pod="openshift-marketplace/redhat-marketplace-dnvxv" Nov 24 13:30:54 crc kubenswrapper[5039]: I1124 13:30:54.369538 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7tvw\" (UniqueName: \"kubernetes.io/projected/742ddebe-c68e-420d-af73-9e853cc648af-kube-api-access-l7tvw\") pod \"redhat-marketplace-dnvxv\" (UID: \"742ddebe-c68e-420d-af73-9e853cc648af\") " pod="openshift-marketplace/redhat-marketplace-dnvxv" Nov 24 13:30:54 crc kubenswrapper[5039]: I1124 13:30:54.510776 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dnvxv" Nov 24 13:30:55 crc kubenswrapper[5039]: I1124 13:30:55.004427 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dnvxv"] Nov 24 13:30:55 crc kubenswrapper[5039]: W1124 13:30:55.016448 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod742ddebe_c68e_420d_af73_9e853cc648af.slice/crio-71b775eaa86c5fe2469246363c325078e7a34abda9deb77c3616015cf0fe49f2 WatchSource:0}: Error finding container 71b775eaa86c5fe2469246363c325078e7a34abda9deb77c3616015cf0fe49f2: Status 404 returned error can't find the container with id 71b775eaa86c5fe2469246363c325078e7a34abda9deb77c3616015cf0fe49f2 Nov 24 13:30:55 crc kubenswrapper[5039]: I1124 13:30:55.051147 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dnvxv" event={"ID":"742ddebe-c68e-420d-af73-9e853cc648af","Type":"ContainerStarted","Data":"71b775eaa86c5fe2469246363c325078e7a34abda9deb77c3616015cf0fe49f2"} Nov 24 13:30:56 crc kubenswrapper[5039]: I1124 13:30:56.060978 5039 generic.go:334] "Generic (PLEG): container finished" podID="742ddebe-c68e-420d-af73-9e853cc648af" containerID="b0b51cecbac1e00ff2cc71aebf917d91b8b5d668c2fb1db3c3de2af0886f913c" exitCode=0 Nov 24 13:30:56 crc kubenswrapper[5039]: I1124 13:30:56.061037 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dnvxv" event={"ID":"742ddebe-c68e-420d-af73-9e853cc648af","Type":"ContainerDied","Data":"b0b51cecbac1e00ff2cc71aebf917d91b8b5d668c2fb1db3c3de2af0886f913c"} Nov 24 13:30:57 crc kubenswrapper[5039]: I1124 13:30:57.081036 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dnvxv" event={"ID":"742ddebe-c68e-420d-af73-9e853cc648af","Type":"ContainerStarted","Data":"ed97784e9aea7c1c27843334b9983729bd41e3d9c5b41ec576a4891bbaffad73"} Nov 24 13:30:58 crc kubenswrapper[5039]: I1124 13:30:58.090868 5039 generic.go:334] "Generic (PLEG): container finished" podID="742ddebe-c68e-420d-af73-9e853cc648af" containerID="ed97784e9aea7c1c27843334b9983729bd41e3d9c5b41ec576a4891bbaffad73" exitCode=0 Nov 24 13:30:58 crc kubenswrapper[5039]: I1124 13:30:58.091275 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dnvxv" event={"ID":"742ddebe-c68e-420d-af73-9e853cc648af","Type":"ContainerDied","Data":"ed97784e9aea7c1c27843334b9983729bd41e3d9c5b41ec576a4891bbaffad73"} Nov 24 13:30:58 crc kubenswrapper[5039]: I1124 13:30:58.091314 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dnvxv" event={"ID":"742ddebe-c68e-420d-af73-9e853cc648af","Type":"ContainerStarted","Data":"2dff1dd3d518b73dcc5aaaab62cf31d13325cb857894f7e7ea9813cf79f85dce"} Nov 24 13:30:58 crc kubenswrapper[5039]: I1124 13:30:58.118206 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dnvxv" podStartSLOduration=2.5786345920000002 podStartE2EDuration="4.118179683s" podCreationTimestamp="2025-11-24 13:30:54 +0000 UTC" firstStartedPulling="2025-11-24 13:30:56.062781407 +0000 UTC m=+768.501905907" lastFinishedPulling="2025-11-24 13:30:57.602326458 +0000 UTC m=+770.041450998" observedRunningTime="2025-11-24 13:30:58.115119828 +0000 UTC m=+770.554244388" watchObservedRunningTime="2025-11-24 13:30:58.118179683 +0000 UTC m=+770.557304213" Nov 24 13:31:00 crc kubenswrapper[5039]: I1124 13:31:00.561767 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-km927"] Nov 24 13:31:00 crc kubenswrapper[5039]: I1124 13:31:00.564067 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-km927" Nov 24 13:31:00 crc kubenswrapper[5039]: I1124 13:31:00.583046 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-km927"] Nov 24 13:31:00 crc kubenswrapper[5039]: I1124 13:31:00.759147 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aee8c427-ee32-4093-8dca-4a5080d2e880-utilities\") pod \"certified-operators-km927\" (UID: \"aee8c427-ee32-4093-8dca-4a5080d2e880\") " pod="openshift-marketplace/certified-operators-km927" Nov 24 13:31:00 crc kubenswrapper[5039]: I1124 13:31:00.759258 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxtzj\" (UniqueName: \"kubernetes.io/projected/aee8c427-ee32-4093-8dca-4a5080d2e880-kube-api-access-zxtzj\") pod \"certified-operators-km927\" (UID: \"aee8c427-ee32-4093-8dca-4a5080d2e880\") " pod="openshift-marketplace/certified-operators-km927" Nov 24 13:31:00 crc kubenswrapper[5039]: I1124 13:31:00.759331 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aee8c427-ee32-4093-8dca-4a5080d2e880-catalog-content\") pod \"certified-operators-km927\" (UID: \"aee8c427-ee32-4093-8dca-4a5080d2e880\") " pod="openshift-marketplace/certified-operators-km927" Nov 24 13:31:00 crc kubenswrapper[5039]: I1124 13:31:00.860782 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aee8c427-ee32-4093-8dca-4a5080d2e880-catalog-content\") pod \"certified-operators-km927\" (UID: \"aee8c427-ee32-4093-8dca-4a5080d2e880\") " pod="openshift-marketplace/certified-operators-km927" Nov 24 13:31:00 crc kubenswrapper[5039]: I1124 13:31:00.860865 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aee8c427-ee32-4093-8dca-4a5080d2e880-utilities\") pod \"certified-operators-km927\" (UID: \"aee8c427-ee32-4093-8dca-4a5080d2e880\") " pod="openshift-marketplace/certified-operators-km927" Nov 24 13:31:00 crc kubenswrapper[5039]: I1124 13:31:00.860925 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxtzj\" (UniqueName: \"kubernetes.io/projected/aee8c427-ee32-4093-8dca-4a5080d2e880-kube-api-access-zxtzj\") pod \"certified-operators-km927\" (UID: \"aee8c427-ee32-4093-8dca-4a5080d2e880\") " pod="openshift-marketplace/certified-operators-km927" Nov 24 13:31:00 crc kubenswrapper[5039]: I1124 13:31:00.861289 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aee8c427-ee32-4093-8dca-4a5080d2e880-catalog-content\") pod \"certified-operators-km927\" (UID: \"aee8c427-ee32-4093-8dca-4a5080d2e880\") " pod="openshift-marketplace/certified-operators-km927" Nov 24 13:31:00 crc kubenswrapper[5039]: I1124 13:31:00.861373 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aee8c427-ee32-4093-8dca-4a5080d2e880-utilities\") pod \"certified-operators-km927\" (UID: \"aee8c427-ee32-4093-8dca-4a5080d2e880\") " pod="openshift-marketplace/certified-operators-km927" Nov 24 13:31:00 crc kubenswrapper[5039]: I1124 13:31:00.890659 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxtzj\" (UniqueName: \"kubernetes.io/projected/aee8c427-ee32-4093-8dca-4a5080d2e880-kube-api-access-zxtzj\") pod \"certified-operators-km927\" (UID: \"aee8c427-ee32-4093-8dca-4a5080d2e880\") " pod="openshift-marketplace/certified-operators-km927" Nov 24 13:31:00 crc kubenswrapper[5039]: I1124 13:31:00.900974 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-km927" Nov 24 13:31:01 crc kubenswrapper[5039]: I1124 13:31:01.411370 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-km927"] Nov 24 13:31:02 crc kubenswrapper[5039]: I1124 13:31:02.120898 5039 generic.go:334] "Generic (PLEG): container finished" podID="aee8c427-ee32-4093-8dca-4a5080d2e880" containerID="fa359c05c4c92656b4df434dc7c8a98524fe7bc5063b03c1cbaafdddb83f7a2c" exitCode=0 Nov 24 13:31:02 crc kubenswrapper[5039]: I1124 13:31:02.120963 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-km927" event={"ID":"aee8c427-ee32-4093-8dca-4a5080d2e880","Type":"ContainerDied","Data":"fa359c05c4c92656b4df434dc7c8a98524fe7bc5063b03c1cbaafdddb83f7a2c"} Nov 24 13:31:02 crc kubenswrapper[5039]: I1124 13:31:02.121001 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-km927" event={"ID":"aee8c427-ee32-4093-8dca-4a5080d2e880","Type":"ContainerStarted","Data":"01e56228101231d69abd27b12bc990336dd2d45061e2f7db46e3c387b1fda3fe"} Nov 24 13:31:03 crc kubenswrapper[5039]: I1124 13:31:03.863482 5039 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Nov 24 13:31:03 crc kubenswrapper[5039]: I1124 13:31:03.863865 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="ffdab614-73c1-4ac9-adba-d2ec7ce81550" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 24 13:31:04 crc kubenswrapper[5039]: I1124 13:31:04.133682 5039 generic.go:334] "Generic (PLEG): container finished" podID="aee8c427-ee32-4093-8dca-4a5080d2e880" containerID="c16c60fcd3290330fe0473f541b2708e1a3476c4ce084cc2446b9d556ef0c4fc" exitCode=0 Nov 24 13:31:04 crc kubenswrapper[5039]: I1124 13:31:04.133779 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-km927" event={"ID":"aee8c427-ee32-4093-8dca-4a5080d2e880","Type":"ContainerDied","Data":"c16c60fcd3290330fe0473f541b2708e1a3476c4ce084cc2446b9d556ef0c4fc"} Nov 24 13:31:04 crc kubenswrapper[5039]: I1124 13:31:04.511993 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dnvxv" Nov 24 13:31:04 crc kubenswrapper[5039]: I1124 13:31:04.512082 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dnvxv" Nov 24 13:31:04 crc kubenswrapper[5039]: I1124 13:31:04.554794 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dnvxv" Nov 24 13:31:05 crc kubenswrapper[5039]: I1124 13:31:05.142797 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-km927" event={"ID":"aee8c427-ee32-4093-8dca-4a5080d2e880","Type":"ContainerStarted","Data":"86afc39345f61e5b427d7ceb8cba8dc9710738087050c658c9fd21e17361fa19"} Nov 24 13:31:05 crc kubenswrapper[5039]: I1124 13:31:05.169936 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-km927" podStartSLOduration=2.699921526 podStartE2EDuration="5.169901489s" podCreationTimestamp="2025-11-24 13:31:00 +0000 UTC" firstStartedPulling="2025-11-24 13:31:02.123840632 +0000 UTC m=+774.562965182" lastFinishedPulling="2025-11-24 13:31:04.593820625 +0000 UTC m=+777.032945145" observedRunningTime="2025-11-24 13:31:05.16082133 +0000 UTC m=+777.599945840" watchObservedRunningTime="2025-11-24 13:31:05.169901489 +0000 UTC m=+777.609026029" Nov 24 13:31:05 crc kubenswrapper[5039]: I1124 13:31:05.188815 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dnvxv" Nov 24 13:31:06 crc kubenswrapper[5039]: I1124 13:31:06.150037 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dnvxv"] Nov 24 13:31:07 crc kubenswrapper[5039]: I1124 13:31:07.158399 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dnvxv" podUID="742ddebe-c68e-420d-af73-9e853cc648af" containerName="registry-server" containerID="cri-o://2dff1dd3d518b73dcc5aaaab62cf31d13325cb857894f7e7ea9813cf79f85dce" gracePeriod=2 Nov 24 13:31:07 crc kubenswrapper[5039]: I1124 13:31:07.778630 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dnvxv" Nov 24 13:31:07 crc kubenswrapper[5039]: I1124 13:31:07.869305 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/742ddebe-c68e-420d-af73-9e853cc648af-catalog-content\") pod \"742ddebe-c68e-420d-af73-9e853cc648af\" (UID: \"742ddebe-c68e-420d-af73-9e853cc648af\") " Nov 24 13:31:07 crc kubenswrapper[5039]: I1124 13:31:07.869413 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l7tvw\" (UniqueName: \"kubernetes.io/projected/742ddebe-c68e-420d-af73-9e853cc648af-kube-api-access-l7tvw\") pod \"742ddebe-c68e-420d-af73-9e853cc648af\" (UID: \"742ddebe-c68e-420d-af73-9e853cc648af\") " Nov 24 13:31:07 crc kubenswrapper[5039]: I1124 13:31:07.869588 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/742ddebe-c68e-420d-af73-9e853cc648af-utilities\") pod \"742ddebe-c68e-420d-af73-9e853cc648af\" (UID: \"742ddebe-c68e-420d-af73-9e853cc648af\") " Nov 24 13:31:07 crc kubenswrapper[5039]: I1124 13:31:07.870415 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/742ddebe-c68e-420d-af73-9e853cc648af-utilities" (OuterVolumeSpecName: "utilities") pod "742ddebe-c68e-420d-af73-9e853cc648af" (UID: "742ddebe-c68e-420d-af73-9e853cc648af"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:31:07 crc kubenswrapper[5039]: I1124 13:31:07.879105 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/742ddebe-c68e-420d-af73-9e853cc648af-kube-api-access-l7tvw" (OuterVolumeSpecName: "kube-api-access-l7tvw") pod "742ddebe-c68e-420d-af73-9e853cc648af" (UID: "742ddebe-c68e-420d-af73-9e853cc648af"). InnerVolumeSpecName "kube-api-access-l7tvw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:31:07 crc kubenswrapper[5039]: I1124 13:31:07.887470 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/742ddebe-c68e-420d-af73-9e853cc648af-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "742ddebe-c68e-420d-af73-9e853cc648af" (UID: "742ddebe-c68e-420d-af73-9e853cc648af"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:31:07 crc kubenswrapper[5039]: I1124 13:31:07.971467 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l7tvw\" (UniqueName: \"kubernetes.io/projected/742ddebe-c68e-420d-af73-9e853cc648af-kube-api-access-l7tvw\") on node \"crc\" DevicePath \"\"" Nov 24 13:31:07 crc kubenswrapper[5039]: I1124 13:31:07.971573 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/742ddebe-c68e-420d-af73-9e853cc648af-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:31:07 crc kubenswrapper[5039]: I1124 13:31:07.971597 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/742ddebe-c68e-420d-af73-9e853cc648af-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:31:08 crc kubenswrapper[5039]: I1124 13:31:08.166876 5039 generic.go:334] "Generic (PLEG): container finished" podID="742ddebe-c68e-420d-af73-9e853cc648af" containerID="2dff1dd3d518b73dcc5aaaab62cf31d13325cb857894f7e7ea9813cf79f85dce" exitCode=0 Nov 24 13:31:08 crc kubenswrapper[5039]: I1124 13:31:08.166917 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dnvxv" event={"ID":"742ddebe-c68e-420d-af73-9e853cc648af","Type":"ContainerDied","Data":"2dff1dd3d518b73dcc5aaaab62cf31d13325cb857894f7e7ea9813cf79f85dce"} Nov 24 13:31:08 crc kubenswrapper[5039]: I1124 13:31:08.166945 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dnvxv" event={"ID":"742ddebe-c68e-420d-af73-9e853cc648af","Type":"ContainerDied","Data":"71b775eaa86c5fe2469246363c325078e7a34abda9deb77c3616015cf0fe49f2"} Nov 24 13:31:08 crc kubenswrapper[5039]: I1124 13:31:08.166955 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dnvxv" Nov 24 13:31:08 crc kubenswrapper[5039]: I1124 13:31:08.166963 5039 scope.go:117] "RemoveContainer" containerID="2dff1dd3d518b73dcc5aaaab62cf31d13325cb857894f7e7ea9813cf79f85dce" Nov 24 13:31:08 crc kubenswrapper[5039]: I1124 13:31:08.186020 5039 scope.go:117] "RemoveContainer" containerID="ed97784e9aea7c1c27843334b9983729bd41e3d9c5b41ec576a4891bbaffad73" Nov 24 13:31:08 crc kubenswrapper[5039]: I1124 13:31:08.197560 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dnvxv"] Nov 24 13:31:08 crc kubenswrapper[5039]: I1124 13:31:08.201271 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dnvxv"] Nov 24 13:31:08 crc kubenswrapper[5039]: I1124 13:31:08.227047 5039 scope.go:117] "RemoveContainer" containerID="b0b51cecbac1e00ff2cc71aebf917d91b8b5d668c2fb1db3c3de2af0886f913c" Nov 24 13:31:08 crc kubenswrapper[5039]: I1124 13:31:08.242175 5039 scope.go:117] "RemoveContainer" containerID="2dff1dd3d518b73dcc5aaaab62cf31d13325cb857894f7e7ea9813cf79f85dce" Nov 24 13:31:08 crc kubenswrapper[5039]: E1124 13:31:08.242547 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2dff1dd3d518b73dcc5aaaab62cf31d13325cb857894f7e7ea9813cf79f85dce\": container with ID starting with 2dff1dd3d518b73dcc5aaaab62cf31d13325cb857894f7e7ea9813cf79f85dce not found: ID does not exist" containerID="2dff1dd3d518b73dcc5aaaab62cf31d13325cb857894f7e7ea9813cf79f85dce" Nov 24 13:31:08 crc kubenswrapper[5039]: I1124 13:31:08.242576 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2dff1dd3d518b73dcc5aaaab62cf31d13325cb857894f7e7ea9813cf79f85dce"} err="failed to get container status \"2dff1dd3d518b73dcc5aaaab62cf31d13325cb857894f7e7ea9813cf79f85dce\": rpc error: code = NotFound desc = could not find container \"2dff1dd3d518b73dcc5aaaab62cf31d13325cb857894f7e7ea9813cf79f85dce\": container with ID starting with 2dff1dd3d518b73dcc5aaaab62cf31d13325cb857894f7e7ea9813cf79f85dce not found: ID does not exist" Nov 24 13:31:08 crc kubenswrapper[5039]: I1124 13:31:08.242594 5039 scope.go:117] "RemoveContainer" containerID="ed97784e9aea7c1c27843334b9983729bd41e3d9c5b41ec576a4891bbaffad73" Nov 24 13:31:08 crc kubenswrapper[5039]: E1124 13:31:08.242909 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed97784e9aea7c1c27843334b9983729bd41e3d9c5b41ec576a4891bbaffad73\": container with ID starting with ed97784e9aea7c1c27843334b9983729bd41e3d9c5b41ec576a4891bbaffad73 not found: ID does not exist" containerID="ed97784e9aea7c1c27843334b9983729bd41e3d9c5b41ec576a4891bbaffad73" Nov 24 13:31:08 crc kubenswrapper[5039]: I1124 13:31:08.242931 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed97784e9aea7c1c27843334b9983729bd41e3d9c5b41ec576a4891bbaffad73"} err="failed to get container status \"ed97784e9aea7c1c27843334b9983729bd41e3d9c5b41ec576a4891bbaffad73\": rpc error: code = NotFound desc = could not find container \"ed97784e9aea7c1c27843334b9983729bd41e3d9c5b41ec576a4891bbaffad73\": container with ID starting with ed97784e9aea7c1c27843334b9983729bd41e3d9c5b41ec576a4891bbaffad73 not found: ID does not exist" Nov 24 13:31:08 crc kubenswrapper[5039]: I1124 13:31:08.242977 5039 scope.go:117] "RemoveContainer" containerID="b0b51cecbac1e00ff2cc71aebf917d91b8b5d668c2fb1db3c3de2af0886f913c" Nov 24 13:31:08 crc kubenswrapper[5039]: E1124 13:31:08.243144 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0b51cecbac1e00ff2cc71aebf917d91b8b5d668c2fb1db3c3de2af0886f913c\": container with ID starting with b0b51cecbac1e00ff2cc71aebf917d91b8b5d668c2fb1db3c3de2af0886f913c not found: ID does not exist" containerID="b0b51cecbac1e00ff2cc71aebf917d91b8b5d668c2fb1db3c3de2af0886f913c" Nov 24 13:31:08 crc kubenswrapper[5039]: I1124 13:31:08.243163 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0b51cecbac1e00ff2cc71aebf917d91b8b5d668c2fb1db3c3de2af0886f913c"} err="failed to get container status \"b0b51cecbac1e00ff2cc71aebf917d91b8b5d668c2fb1db3c3de2af0886f913c\": rpc error: code = NotFound desc = could not find container \"b0b51cecbac1e00ff2cc71aebf917d91b8b5d668c2fb1db3c3de2af0886f913c\": container with ID starting with b0b51cecbac1e00ff2cc71aebf917d91b8b5d668c2fb1db3c3de2af0886f913c not found: ID does not exist" Nov 24 13:31:08 crc kubenswrapper[5039]: I1124 13:31:08.315545 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="742ddebe-c68e-420d-af73-9e853cc648af" path="/var/lib/kubelet/pods/742ddebe-c68e-420d-af73-9e853cc648af/volumes" Nov 24 13:31:09 crc kubenswrapper[5039]: I1124 13:31:09.566053 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zl8gv"] Nov 24 13:31:09 crc kubenswrapper[5039]: E1124 13:31:09.566731 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="742ddebe-c68e-420d-af73-9e853cc648af" containerName="extract-utilities" Nov 24 13:31:09 crc kubenswrapper[5039]: I1124 13:31:09.566750 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="742ddebe-c68e-420d-af73-9e853cc648af" containerName="extract-utilities" Nov 24 13:31:09 crc kubenswrapper[5039]: E1124 13:31:09.566768 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="742ddebe-c68e-420d-af73-9e853cc648af" containerName="extract-content" Nov 24 13:31:09 crc kubenswrapper[5039]: I1124 13:31:09.566779 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="742ddebe-c68e-420d-af73-9e853cc648af" containerName="extract-content" Nov 24 13:31:09 crc kubenswrapper[5039]: E1124 13:31:09.566804 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="742ddebe-c68e-420d-af73-9e853cc648af" containerName="registry-server" Nov 24 13:31:09 crc kubenswrapper[5039]: I1124 13:31:09.566814 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="742ddebe-c68e-420d-af73-9e853cc648af" containerName="registry-server" Nov 24 13:31:09 crc kubenswrapper[5039]: I1124 13:31:09.567001 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="742ddebe-c68e-420d-af73-9e853cc648af" containerName="registry-server" Nov 24 13:31:09 crc kubenswrapper[5039]: I1124 13:31:09.568660 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zl8gv" Nov 24 13:31:09 crc kubenswrapper[5039]: I1124 13:31:09.583369 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zl8gv"] Nov 24 13:31:09 crc kubenswrapper[5039]: I1124 13:31:09.592892 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1abeea9-494a-4062-95a5-373ffaf1e3e9-catalog-content\") pod \"redhat-operators-zl8gv\" (UID: \"e1abeea9-494a-4062-95a5-373ffaf1e3e9\") " pod="openshift-marketplace/redhat-operators-zl8gv" Nov 24 13:31:09 crc kubenswrapper[5039]: I1124 13:31:09.592937 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1abeea9-494a-4062-95a5-373ffaf1e3e9-utilities\") pod \"redhat-operators-zl8gv\" (UID: \"e1abeea9-494a-4062-95a5-373ffaf1e3e9\") " pod="openshift-marketplace/redhat-operators-zl8gv" Nov 24 13:31:09 crc kubenswrapper[5039]: I1124 13:31:09.592968 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5j8kp\" (UniqueName: \"kubernetes.io/projected/e1abeea9-494a-4062-95a5-373ffaf1e3e9-kube-api-access-5j8kp\") pod \"redhat-operators-zl8gv\" (UID: \"e1abeea9-494a-4062-95a5-373ffaf1e3e9\") " pod="openshift-marketplace/redhat-operators-zl8gv" Nov 24 13:31:09 crc kubenswrapper[5039]: I1124 13:31:09.694787 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1abeea9-494a-4062-95a5-373ffaf1e3e9-catalog-content\") pod \"redhat-operators-zl8gv\" (UID: \"e1abeea9-494a-4062-95a5-373ffaf1e3e9\") " pod="openshift-marketplace/redhat-operators-zl8gv" Nov 24 13:31:09 crc kubenswrapper[5039]: I1124 13:31:09.694846 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1abeea9-494a-4062-95a5-373ffaf1e3e9-utilities\") pod \"redhat-operators-zl8gv\" (UID: \"e1abeea9-494a-4062-95a5-373ffaf1e3e9\") " pod="openshift-marketplace/redhat-operators-zl8gv" Nov 24 13:31:09 crc kubenswrapper[5039]: I1124 13:31:09.694871 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5j8kp\" (UniqueName: \"kubernetes.io/projected/e1abeea9-494a-4062-95a5-373ffaf1e3e9-kube-api-access-5j8kp\") pod \"redhat-operators-zl8gv\" (UID: \"e1abeea9-494a-4062-95a5-373ffaf1e3e9\") " pod="openshift-marketplace/redhat-operators-zl8gv" Nov 24 13:31:09 crc kubenswrapper[5039]: I1124 13:31:09.695410 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1abeea9-494a-4062-95a5-373ffaf1e3e9-catalog-content\") pod \"redhat-operators-zl8gv\" (UID: \"e1abeea9-494a-4062-95a5-373ffaf1e3e9\") " pod="openshift-marketplace/redhat-operators-zl8gv" Nov 24 13:31:09 crc kubenswrapper[5039]: I1124 13:31:09.695454 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1abeea9-494a-4062-95a5-373ffaf1e3e9-utilities\") pod \"redhat-operators-zl8gv\" (UID: \"e1abeea9-494a-4062-95a5-373ffaf1e3e9\") " pod="openshift-marketplace/redhat-operators-zl8gv" Nov 24 13:31:09 crc kubenswrapper[5039]: I1124 13:31:09.714417 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5j8kp\" (UniqueName: \"kubernetes.io/projected/e1abeea9-494a-4062-95a5-373ffaf1e3e9-kube-api-access-5j8kp\") pod \"redhat-operators-zl8gv\" (UID: \"e1abeea9-494a-4062-95a5-373ffaf1e3e9\") " pod="openshift-marketplace/redhat-operators-zl8gv" Nov 24 13:31:09 crc kubenswrapper[5039]: I1124 13:31:09.941539 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zl8gv" Nov 24 13:31:10 crc kubenswrapper[5039]: I1124 13:31:10.360169 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zl8gv"] Nov 24 13:31:10 crc kubenswrapper[5039]: I1124 13:31:10.901143 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-km927" Nov 24 13:31:10 crc kubenswrapper[5039]: I1124 13:31:10.901200 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-km927" Nov 24 13:31:10 crc kubenswrapper[5039]: I1124 13:31:10.941645 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-km927" Nov 24 13:31:11 crc kubenswrapper[5039]: I1124 13:31:11.189272 5039 generic.go:334] "Generic (PLEG): container finished" podID="e1abeea9-494a-4062-95a5-373ffaf1e3e9" containerID="a1931f8c1a824d6bc3d9f373eba62cd26de7ba60c309612bb23b432fda4cd867" exitCode=0 Nov 24 13:31:11 crc kubenswrapper[5039]: I1124 13:31:11.189381 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zl8gv" event={"ID":"e1abeea9-494a-4062-95a5-373ffaf1e3e9","Type":"ContainerDied","Data":"a1931f8c1a824d6bc3d9f373eba62cd26de7ba60c309612bb23b432fda4cd867"} Nov 24 13:31:11 crc kubenswrapper[5039]: I1124 13:31:11.189425 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zl8gv" event={"ID":"e1abeea9-494a-4062-95a5-373ffaf1e3e9","Type":"ContainerStarted","Data":"ab24bdfabb2b09edc238aefe047f139b5a6e0872efbf5a7beefd715015734b8d"} Nov 24 13:31:11 crc kubenswrapper[5039]: I1124 13:31:11.273313 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-km927" Nov 24 13:31:13 crc kubenswrapper[5039]: I1124 13:31:13.206698 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zl8gv" event={"ID":"e1abeea9-494a-4062-95a5-373ffaf1e3e9","Type":"ContainerStarted","Data":"668a5cbd7b10a541385514ab8acf9363a8a1d278888cb9a34154e6c49c43e085"} Nov 24 13:31:13 crc kubenswrapper[5039]: I1124 13:31:13.862349 5039 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Nov 24 13:31:13 crc kubenswrapper[5039]: I1124 13:31:13.862444 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="ffdab614-73c1-4ac9-adba-d2ec7ce81550" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 24 13:31:13 crc kubenswrapper[5039]: I1124 13:31:13.952795 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-km927"] Nov 24 13:31:13 crc kubenswrapper[5039]: I1124 13:31:13.953197 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-km927" podUID="aee8c427-ee32-4093-8dca-4a5080d2e880" containerName="registry-server" containerID="cri-o://86afc39345f61e5b427d7ceb8cba8dc9710738087050c658c9fd21e17361fa19" gracePeriod=2 Nov 24 13:31:14 crc kubenswrapper[5039]: I1124 13:31:14.216292 5039 generic.go:334] "Generic (PLEG): container finished" podID="e1abeea9-494a-4062-95a5-373ffaf1e3e9" containerID="668a5cbd7b10a541385514ab8acf9363a8a1d278888cb9a34154e6c49c43e085" exitCode=0 Nov 24 13:31:14 crc kubenswrapper[5039]: I1124 13:31:14.216400 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zl8gv" event={"ID":"e1abeea9-494a-4062-95a5-373ffaf1e3e9","Type":"ContainerDied","Data":"668a5cbd7b10a541385514ab8acf9363a8a1d278888cb9a34154e6c49c43e085"} Nov 24 13:31:14 crc kubenswrapper[5039]: I1124 13:31:14.218710 5039 generic.go:334] "Generic (PLEG): container finished" podID="aee8c427-ee32-4093-8dca-4a5080d2e880" containerID="86afc39345f61e5b427d7ceb8cba8dc9710738087050c658c9fd21e17361fa19" exitCode=0 Nov 24 13:31:14 crc kubenswrapper[5039]: I1124 13:31:14.218743 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-km927" event={"ID":"aee8c427-ee32-4093-8dca-4a5080d2e880","Type":"ContainerDied","Data":"86afc39345f61e5b427d7ceb8cba8dc9710738087050c658c9fd21e17361fa19"} Nov 24 13:31:15 crc kubenswrapper[5039]: I1124 13:31:15.337741 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-km927" Nov 24 13:31:15 crc kubenswrapper[5039]: I1124 13:31:15.472252 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zxtzj\" (UniqueName: \"kubernetes.io/projected/aee8c427-ee32-4093-8dca-4a5080d2e880-kube-api-access-zxtzj\") pod \"aee8c427-ee32-4093-8dca-4a5080d2e880\" (UID: \"aee8c427-ee32-4093-8dca-4a5080d2e880\") " Nov 24 13:31:15 crc kubenswrapper[5039]: I1124 13:31:15.472599 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aee8c427-ee32-4093-8dca-4a5080d2e880-catalog-content\") pod \"aee8c427-ee32-4093-8dca-4a5080d2e880\" (UID: \"aee8c427-ee32-4093-8dca-4a5080d2e880\") " Nov 24 13:31:15 crc kubenswrapper[5039]: I1124 13:31:15.472702 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aee8c427-ee32-4093-8dca-4a5080d2e880-utilities\") pod \"aee8c427-ee32-4093-8dca-4a5080d2e880\" (UID: \"aee8c427-ee32-4093-8dca-4a5080d2e880\") " Nov 24 13:31:15 crc kubenswrapper[5039]: I1124 13:31:15.474181 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aee8c427-ee32-4093-8dca-4a5080d2e880-utilities" (OuterVolumeSpecName: "utilities") pod "aee8c427-ee32-4093-8dca-4a5080d2e880" (UID: "aee8c427-ee32-4093-8dca-4a5080d2e880"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:31:15 crc kubenswrapper[5039]: I1124 13:31:15.479738 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aee8c427-ee32-4093-8dca-4a5080d2e880-kube-api-access-zxtzj" (OuterVolumeSpecName: "kube-api-access-zxtzj") pod "aee8c427-ee32-4093-8dca-4a5080d2e880" (UID: "aee8c427-ee32-4093-8dca-4a5080d2e880"). InnerVolumeSpecName "kube-api-access-zxtzj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:31:15 crc kubenswrapper[5039]: I1124 13:31:15.517052 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aee8c427-ee32-4093-8dca-4a5080d2e880-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "aee8c427-ee32-4093-8dca-4a5080d2e880" (UID: "aee8c427-ee32-4093-8dca-4a5080d2e880"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:31:15 crc kubenswrapper[5039]: I1124 13:31:15.574344 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aee8c427-ee32-4093-8dca-4a5080d2e880-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:31:15 crc kubenswrapper[5039]: I1124 13:31:15.574386 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zxtzj\" (UniqueName: \"kubernetes.io/projected/aee8c427-ee32-4093-8dca-4a5080d2e880-kube-api-access-zxtzj\") on node \"crc\" DevicePath \"\"" Nov 24 13:31:15 crc kubenswrapper[5039]: I1124 13:31:15.574400 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aee8c427-ee32-4093-8dca-4a5080d2e880-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:31:16 crc kubenswrapper[5039]: I1124 13:31:16.235254 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-km927" event={"ID":"aee8c427-ee32-4093-8dca-4a5080d2e880","Type":"ContainerDied","Data":"01e56228101231d69abd27b12bc990336dd2d45061e2f7db46e3c387b1fda3fe"} Nov 24 13:31:16 crc kubenswrapper[5039]: I1124 13:31:16.235341 5039 scope.go:117] "RemoveContainer" containerID="86afc39345f61e5b427d7ceb8cba8dc9710738087050c658c9fd21e17361fa19" Nov 24 13:31:16 crc kubenswrapper[5039]: I1124 13:31:16.235395 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-km927" Nov 24 13:31:16 crc kubenswrapper[5039]: I1124 13:31:16.239212 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zl8gv" event={"ID":"e1abeea9-494a-4062-95a5-373ffaf1e3e9","Type":"ContainerStarted","Data":"6790e1927eceefb9e19e37f47136fc9fa8e706d391b8ce5adab04ec7bc665d67"} Nov 24 13:31:16 crc kubenswrapper[5039]: I1124 13:31:16.262489 5039 scope.go:117] "RemoveContainer" containerID="c16c60fcd3290330fe0473f541b2708e1a3476c4ce084cc2446b9d556ef0c4fc" Nov 24 13:31:16 crc kubenswrapper[5039]: I1124 13:31:16.271899 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zl8gv" podStartSLOduration=2.831059347 podStartE2EDuration="7.271872511s" podCreationTimestamp="2025-11-24 13:31:09 +0000 UTC" firstStartedPulling="2025-11-24 13:31:11.191363392 +0000 UTC m=+783.630487902" lastFinishedPulling="2025-11-24 13:31:15.632176556 +0000 UTC m=+788.071301066" observedRunningTime="2025-11-24 13:31:16.264909853 +0000 UTC m=+788.704034393" watchObservedRunningTime="2025-11-24 13:31:16.271872511 +0000 UTC m=+788.710997051" Nov 24 13:31:16 crc kubenswrapper[5039]: I1124 13:31:16.286861 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-km927"] Nov 24 13:31:16 crc kubenswrapper[5039]: I1124 13:31:16.296656 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-km927"] Nov 24 13:31:16 crc kubenswrapper[5039]: I1124 13:31:16.300477 5039 scope.go:117] "RemoveContainer" containerID="fa359c05c4c92656b4df434dc7c8a98524fe7bc5063b03c1cbaafdddb83f7a2c" Nov 24 13:31:16 crc kubenswrapper[5039]: I1124 13:31:16.316738 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aee8c427-ee32-4093-8dca-4a5080d2e880" path="/var/lib/kubelet/pods/aee8c427-ee32-4093-8dca-4a5080d2e880/volumes" Nov 24 13:31:19 crc kubenswrapper[5039]: I1124 13:31:19.942467 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zl8gv" Nov 24 13:31:19 crc kubenswrapper[5039]: I1124 13:31:19.943753 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zl8gv" Nov 24 13:31:21 crc kubenswrapper[5039]: I1124 13:31:21.015379 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zl8gv" podUID="e1abeea9-494a-4062-95a5-373ffaf1e3e9" containerName="registry-server" probeResult="failure" output=< Nov 24 13:31:21 crc kubenswrapper[5039]: timeout: failed to connect service ":50051" within 1s Nov 24 13:31:21 crc kubenswrapper[5039]: > Nov 24 13:31:23 crc kubenswrapper[5039]: I1124 13:31:23.864402 5039 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Nov 24 13:31:23 crc kubenswrapper[5039]: I1124 13:31:23.864838 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="ffdab614-73c1-4ac9-adba-d2ec7ce81550" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 24 13:31:30 crc kubenswrapper[5039]: I1124 13:31:30.070190 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zl8gv" Nov 24 13:31:30 crc kubenswrapper[5039]: I1124 13:31:30.109425 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zl8gv" Nov 24 13:31:30 crc kubenswrapper[5039]: I1124 13:31:30.299766 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zl8gv"] Nov 24 13:31:31 crc kubenswrapper[5039]: I1124 13:31:31.366977 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zl8gv" podUID="e1abeea9-494a-4062-95a5-373ffaf1e3e9" containerName="registry-server" containerID="cri-o://6790e1927eceefb9e19e37f47136fc9fa8e706d391b8ce5adab04ec7bc665d67" gracePeriod=2 Nov 24 13:31:31 crc kubenswrapper[5039]: I1124 13:31:31.837776 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zl8gv" Nov 24 13:31:31 crc kubenswrapper[5039]: I1124 13:31:31.944822 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5j8kp\" (UniqueName: \"kubernetes.io/projected/e1abeea9-494a-4062-95a5-373ffaf1e3e9-kube-api-access-5j8kp\") pod \"e1abeea9-494a-4062-95a5-373ffaf1e3e9\" (UID: \"e1abeea9-494a-4062-95a5-373ffaf1e3e9\") " Nov 24 13:31:31 crc kubenswrapper[5039]: I1124 13:31:31.944872 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1abeea9-494a-4062-95a5-373ffaf1e3e9-catalog-content\") pod \"e1abeea9-494a-4062-95a5-373ffaf1e3e9\" (UID: \"e1abeea9-494a-4062-95a5-373ffaf1e3e9\") " Nov 24 13:31:31 crc kubenswrapper[5039]: I1124 13:31:31.944895 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1abeea9-494a-4062-95a5-373ffaf1e3e9-utilities\") pod \"e1abeea9-494a-4062-95a5-373ffaf1e3e9\" (UID: \"e1abeea9-494a-4062-95a5-373ffaf1e3e9\") " Nov 24 13:31:31 crc kubenswrapper[5039]: I1124 13:31:31.945969 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e1abeea9-494a-4062-95a5-373ffaf1e3e9-utilities" (OuterVolumeSpecName: "utilities") pod "e1abeea9-494a-4062-95a5-373ffaf1e3e9" (UID: "e1abeea9-494a-4062-95a5-373ffaf1e3e9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:31:31 crc kubenswrapper[5039]: I1124 13:31:31.949629 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1abeea9-494a-4062-95a5-373ffaf1e3e9-kube-api-access-5j8kp" (OuterVolumeSpecName: "kube-api-access-5j8kp") pod "e1abeea9-494a-4062-95a5-373ffaf1e3e9" (UID: "e1abeea9-494a-4062-95a5-373ffaf1e3e9"). InnerVolumeSpecName "kube-api-access-5j8kp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:31:32 crc kubenswrapper[5039]: I1124 13:31:32.033705 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e1abeea9-494a-4062-95a5-373ffaf1e3e9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e1abeea9-494a-4062-95a5-373ffaf1e3e9" (UID: "e1abeea9-494a-4062-95a5-373ffaf1e3e9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:31:32 crc kubenswrapper[5039]: I1124 13:31:32.046403 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5j8kp\" (UniqueName: \"kubernetes.io/projected/e1abeea9-494a-4062-95a5-373ffaf1e3e9-kube-api-access-5j8kp\") on node \"crc\" DevicePath \"\"" Nov 24 13:31:32 crc kubenswrapper[5039]: I1124 13:31:32.046463 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1abeea9-494a-4062-95a5-373ffaf1e3e9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:31:32 crc kubenswrapper[5039]: I1124 13:31:32.046483 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1abeea9-494a-4062-95a5-373ffaf1e3e9-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:31:32 crc kubenswrapper[5039]: I1124 13:31:32.374857 5039 generic.go:334] "Generic (PLEG): container finished" podID="e1abeea9-494a-4062-95a5-373ffaf1e3e9" containerID="6790e1927eceefb9e19e37f47136fc9fa8e706d391b8ce5adab04ec7bc665d67" exitCode=0 Nov 24 13:31:32 crc kubenswrapper[5039]: I1124 13:31:32.375012 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zl8gv" event={"ID":"e1abeea9-494a-4062-95a5-373ffaf1e3e9","Type":"ContainerDied","Data":"6790e1927eceefb9e19e37f47136fc9fa8e706d391b8ce5adab04ec7bc665d67"} Nov 24 13:31:32 crc kubenswrapper[5039]: I1124 13:31:32.375706 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zl8gv" event={"ID":"e1abeea9-494a-4062-95a5-373ffaf1e3e9","Type":"ContainerDied","Data":"ab24bdfabb2b09edc238aefe047f139b5a6e0872efbf5a7beefd715015734b8d"} Nov 24 13:31:32 crc kubenswrapper[5039]: I1124 13:31:32.375784 5039 scope.go:117] "RemoveContainer" containerID="6790e1927eceefb9e19e37f47136fc9fa8e706d391b8ce5adab04ec7bc665d67" Nov 24 13:31:32 crc kubenswrapper[5039]: I1124 13:31:32.375115 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zl8gv" Nov 24 13:31:32 crc kubenswrapper[5039]: I1124 13:31:32.398947 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zl8gv"] Nov 24 13:31:32 crc kubenswrapper[5039]: I1124 13:31:32.402714 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zl8gv"] Nov 24 13:31:32 crc kubenswrapper[5039]: I1124 13:31:32.407679 5039 scope.go:117] "RemoveContainer" containerID="668a5cbd7b10a541385514ab8acf9363a8a1d278888cb9a34154e6c49c43e085" Nov 24 13:31:32 crc kubenswrapper[5039]: I1124 13:31:32.431541 5039 scope.go:117] "RemoveContainer" containerID="a1931f8c1a824d6bc3d9f373eba62cd26de7ba60c309612bb23b432fda4cd867" Nov 24 13:31:32 crc kubenswrapper[5039]: I1124 13:31:32.456913 5039 scope.go:117] "RemoveContainer" containerID="6790e1927eceefb9e19e37f47136fc9fa8e706d391b8ce5adab04ec7bc665d67" Nov 24 13:31:32 crc kubenswrapper[5039]: E1124 13:31:32.457315 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6790e1927eceefb9e19e37f47136fc9fa8e706d391b8ce5adab04ec7bc665d67\": container with ID starting with 6790e1927eceefb9e19e37f47136fc9fa8e706d391b8ce5adab04ec7bc665d67 not found: ID does not exist" containerID="6790e1927eceefb9e19e37f47136fc9fa8e706d391b8ce5adab04ec7bc665d67" Nov 24 13:31:32 crc kubenswrapper[5039]: I1124 13:31:32.457344 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6790e1927eceefb9e19e37f47136fc9fa8e706d391b8ce5adab04ec7bc665d67"} err="failed to get container status \"6790e1927eceefb9e19e37f47136fc9fa8e706d391b8ce5adab04ec7bc665d67\": rpc error: code = NotFound desc = could not find container \"6790e1927eceefb9e19e37f47136fc9fa8e706d391b8ce5adab04ec7bc665d67\": container with ID starting with 6790e1927eceefb9e19e37f47136fc9fa8e706d391b8ce5adab04ec7bc665d67 not found: ID does not exist" Nov 24 13:31:32 crc kubenswrapper[5039]: I1124 13:31:32.457365 5039 scope.go:117] "RemoveContainer" containerID="668a5cbd7b10a541385514ab8acf9363a8a1d278888cb9a34154e6c49c43e085" Nov 24 13:31:32 crc kubenswrapper[5039]: E1124 13:31:32.457728 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"668a5cbd7b10a541385514ab8acf9363a8a1d278888cb9a34154e6c49c43e085\": container with ID starting with 668a5cbd7b10a541385514ab8acf9363a8a1d278888cb9a34154e6c49c43e085 not found: ID does not exist" containerID="668a5cbd7b10a541385514ab8acf9363a8a1d278888cb9a34154e6c49c43e085" Nov 24 13:31:32 crc kubenswrapper[5039]: I1124 13:31:32.457856 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"668a5cbd7b10a541385514ab8acf9363a8a1d278888cb9a34154e6c49c43e085"} err="failed to get container status \"668a5cbd7b10a541385514ab8acf9363a8a1d278888cb9a34154e6c49c43e085\": rpc error: code = NotFound desc = could not find container \"668a5cbd7b10a541385514ab8acf9363a8a1d278888cb9a34154e6c49c43e085\": container with ID starting with 668a5cbd7b10a541385514ab8acf9363a8a1d278888cb9a34154e6c49c43e085 not found: ID does not exist" Nov 24 13:31:32 crc kubenswrapper[5039]: I1124 13:31:32.457946 5039 scope.go:117] "RemoveContainer" containerID="a1931f8c1a824d6bc3d9f373eba62cd26de7ba60c309612bb23b432fda4cd867" Nov 24 13:31:32 crc kubenswrapper[5039]: E1124 13:31:32.458272 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1931f8c1a824d6bc3d9f373eba62cd26de7ba60c309612bb23b432fda4cd867\": container with ID starting with a1931f8c1a824d6bc3d9f373eba62cd26de7ba60c309612bb23b432fda4cd867 not found: ID does not exist" containerID="a1931f8c1a824d6bc3d9f373eba62cd26de7ba60c309612bb23b432fda4cd867" Nov 24 13:31:32 crc kubenswrapper[5039]: I1124 13:31:32.458294 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1931f8c1a824d6bc3d9f373eba62cd26de7ba60c309612bb23b432fda4cd867"} err="failed to get container status \"a1931f8c1a824d6bc3d9f373eba62cd26de7ba60c309612bb23b432fda4cd867\": rpc error: code = NotFound desc = could not find container \"a1931f8c1a824d6bc3d9f373eba62cd26de7ba60c309612bb23b432fda4cd867\": container with ID starting with a1931f8c1a824d6bc3d9f373eba62cd26de7ba60c309612bb23b432fda4cd867 not found: ID does not exist" Nov 24 13:31:33 crc kubenswrapper[5039]: I1124 13:31:33.866037 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-ingester-0" Nov 24 13:31:34 crc kubenswrapper[5039]: I1124 13:31:34.329360 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1abeea9-494a-4062-95a5-373ffaf1e3e9" path="/var/lib/kubelet/pods/e1abeea9-494a-4062-95a5-373ffaf1e3e9/volumes" Nov 24 13:31:51 crc kubenswrapper[5039]: I1124 13:31:51.785937 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bvpmc"] Nov 24 13:31:51 crc kubenswrapper[5039]: E1124 13:31:51.786698 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aee8c427-ee32-4093-8dca-4a5080d2e880" containerName="extract-content" Nov 24 13:31:51 crc kubenswrapper[5039]: I1124 13:31:51.786710 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="aee8c427-ee32-4093-8dca-4a5080d2e880" containerName="extract-content" Nov 24 13:31:51 crc kubenswrapper[5039]: E1124 13:31:51.786720 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1abeea9-494a-4062-95a5-373ffaf1e3e9" containerName="registry-server" Nov 24 13:31:51 crc kubenswrapper[5039]: I1124 13:31:51.786726 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1abeea9-494a-4062-95a5-373ffaf1e3e9" containerName="registry-server" Nov 24 13:31:51 crc kubenswrapper[5039]: E1124 13:31:51.786734 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aee8c427-ee32-4093-8dca-4a5080d2e880" containerName="extract-utilities" Nov 24 13:31:51 crc kubenswrapper[5039]: I1124 13:31:51.786740 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="aee8c427-ee32-4093-8dca-4a5080d2e880" containerName="extract-utilities" Nov 24 13:31:51 crc kubenswrapper[5039]: E1124 13:31:51.786750 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1abeea9-494a-4062-95a5-373ffaf1e3e9" containerName="extract-content" Nov 24 13:31:51 crc kubenswrapper[5039]: I1124 13:31:51.786756 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1abeea9-494a-4062-95a5-373ffaf1e3e9" containerName="extract-content" Nov 24 13:31:51 crc kubenswrapper[5039]: E1124 13:31:51.786766 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1abeea9-494a-4062-95a5-373ffaf1e3e9" containerName="extract-utilities" Nov 24 13:31:51 crc kubenswrapper[5039]: I1124 13:31:51.786772 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1abeea9-494a-4062-95a5-373ffaf1e3e9" containerName="extract-utilities" Nov 24 13:31:51 crc kubenswrapper[5039]: E1124 13:31:51.786785 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aee8c427-ee32-4093-8dca-4a5080d2e880" containerName="registry-server" Nov 24 13:31:51 crc kubenswrapper[5039]: I1124 13:31:51.786791 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="aee8c427-ee32-4093-8dca-4a5080d2e880" containerName="registry-server" Nov 24 13:31:51 crc kubenswrapper[5039]: I1124 13:31:51.786904 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="aee8c427-ee32-4093-8dca-4a5080d2e880" containerName="registry-server" Nov 24 13:31:51 crc kubenswrapper[5039]: I1124 13:31:51.786915 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1abeea9-494a-4062-95a5-373ffaf1e3e9" containerName="registry-server" Nov 24 13:31:51 crc kubenswrapper[5039]: I1124 13:31:51.787800 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bvpmc" Nov 24 13:31:51 crc kubenswrapper[5039]: I1124 13:31:51.803941 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bvpmc"] Nov 24 13:31:51 crc kubenswrapper[5039]: I1124 13:31:51.850378 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5dff4d9c-db94-4c31-be35-953d92db1d64-catalog-content\") pod \"community-operators-bvpmc\" (UID: \"5dff4d9c-db94-4c31-be35-953d92db1d64\") " pod="openshift-marketplace/community-operators-bvpmc" Nov 24 13:31:51 crc kubenswrapper[5039]: I1124 13:31:51.850446 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvxbv\" (UniqueName: \"kubernetes.io/projected/5dff4d9c-db94-4c31-be35-953d92db1d64-kube-api-access-bvxbv\") pod \"community-operators-bvpmc\" (UID: \"5dff4d9c-db94-4c31-be35-953d92db1d64\") " pod="openshift-marketplace/community-operators-bvpmc" Nov 24 13:31:51 crc kubenswrapper[5039]: I1124 13:31:51.850491 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5dff4d9c-db94-4c31-be35-953d92db1d64-utilities\") pod \"community-operators-bvpmc\" (UID: \"5dff4d9c-db94-4c31-be35-953d92db1d64\") " pod="openshift-marketplace/community-operators-bvpmc" Nov 24 13:31:51 crc kubenswrapper[5039]: I1124 13:31:51.951380 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvxbv\" (UniqueName: \"kubernetes.io/projected/5dff4d9c-db94-4c31-be35-953d92db1d64-kube-api-access-bvxbv\") pod \"community-operators-bvpmc\" (UID: \"5dff4d9c-db94-4c31-be35-953d92db1d64\") " pod="openshift-marketplace/community-operators-bvpmc" Nov 24 13:31:51 crc kubenswrapper[5039]: I1124 13:31:51.952117 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5dff4d9c-db94-4c31-be35-953d92db1d64-utilities\") pod \"community-operators-bvpmc\" (UID: \"5dff4d9c-db94-4c31-be35-953d92db1d64\") " pod="openshift-marketplace/community-operators-bvpmc" Nov 24 13:31:51 crc kubenswrapper[5039]: I1124 13:31:51.952642 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5dff4d9c-db94-4c31-be35-953d92db1d64-utilities\") pod \"community-operators-bvpmc\" (UID: \"5dff4d9c-db94-4c31-be35-953d92db1d64\") " pod="openshift-marketplace/community-operators-bvpmc" Nov 24 13:31:51 crc kubenswrapper[5039]: I1124 13:31:51.952822 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5dff4d9c-db94-4c31-be35-953d92db1d64-catalog-content\") pod \"community-operators-bvpmc\" (UID: \"5dff4d9c-db94-4c31-be35-953d92db1d64\") " pod="openshift-marketplace/community-operators-bvpmc" Nov 24 13:31:51 crc kubenswrapper[5039]: I1124 13:31:51.953117 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5dff4d9c-db94-4c31-be35-953d92db1d64-catalog-content\") pod \"community-operators-bvpmc\" (UID: \"5dff4d9c-db94-4c31-be35-953d92db1d64\") " pod="openshift-marketplace/community-operators-bvpmc" Nov 24 13:31:51 crc kubenswrapper[5039]: I1124 13:31:51.974390 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvxbv\" (UniqueName: \"kubernetes.io/projected/5dff4d9c-db94-4c31-be35-953d92db1d64-kube-api-access-bvxbv\") pod \"community-operators-bvpmc\" (UID: \"5dff4d9c-db94-4c31-be35-953d92db1d64\") " pod="openshift-marketplace/community-operators-bvpmc" Nov 24 13:31:52 crc kubenswrapper[5039]: I1124 13:31:52.120524 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bvpmc" Nov 24 13:31:52 crc kubenswrapper[5039]: I1124 13:31:52.596462 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bvpmc"] Nov 24 13:31:52 crc kubenswrapper[5039]: I1124 13:31:52.964146 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-9klmk"] Nov 24 13:31:52 crc kubenswrapper[5039]: I1124 13:31:52.965114 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-9klmk" Nov 24 13:31:52 crc kubenswrapper[5039]: I1124 13:31:52.974383 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Nov 24 13:31:52 crc kubenswrapper[5039]: I1124 13:31:52.975143 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Nov 24 13:31:52 crc kubenswrapper[5039]: I1124 13:31:52.975520 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Nov 24 13:31:52 crc kubenswrapper[5039]: I1124 13:31:52.975642 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-frqvk" Nov 24 13:31:52 crc kubenswrapper[5039]: I1124 13:31:52.989878 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Nov 24 13:31:52 crc kubenswrapper[5039]: I1124 13:31:52.993431 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.006357 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-9klmk"] Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.070271 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-entrypoint\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.070332 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-trusted-ca\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.070374 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/630e1a06-0245-4cef-a99e-3965272d6da5-tmp\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.070399 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bl4tn\" (UniqueName: \"kubernetes.io/projected/630e1a06-0245-4cef-a99e-3965272d6da5-kube-api-access-bl4tn\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.070430 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/630e1a06-0245-4cef-a99e-3965272d6da5-collector-token\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.070454 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/630e1a06-0245-4cef-a99e-3965272d6da5-datadir\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.070483 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/630e1a06-0245-4cef-a99e-3965272d6da5-metrics\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.070535 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/630e1a06-0245-4cef-a99e-3965272d6da5-collector-syslog-receiver\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.070562 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-config-openshift-service-cacrt\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.070588 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-config\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.070610 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/630e1a06-0245-4cef-a99e-3965272d6da5-sa-token\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.164690 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-9klmk"] Nov 24 13:31:53 crc kubenswrapper[5039]: E1124 13:31:53.165114 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[collector-syslog-receiver collector-token config config-openshift-service-cacrt datadir entrypoint kube-api-access-bl4tn metrics sa-token tmp trusted-ca], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-logging/collector-9klmk" podUID="630e1a06-0245-4cef-a99e-3965272d6da5" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.172255 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-entrypoint\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.172299 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-trusted-ca\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.172334 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/630e1a06-0245-4cef-a99e-3965272d6da5-tmp\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.172354 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bl4tn\" (UniqueName: \"kubernetes.io/projected/630e1a06-0245-4cef-a99e-3965272d6da5-kube-api-access-bl4tn\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.172381 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/630e1a06-0245-4cef-a99e-3965272d6da5-collector-token\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.172403 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/630e1a06-0245-4cef-a99e-3965272d6da5-datadir\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.172426 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/630e1a06-0245-4cef-a99e-3965272d6da5-metrics\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.172452 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/630e1a06-0245-4cef-a99e-3965272d6da5-collector-syslog-receiver\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.172472 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-config-openshift-service-cacrt\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.172493 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-config\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.172524 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/630e1a06-0245-4cef-a99e-3965272d6da5-sa-token\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.173093 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/630e1a06-0245-4cef-a99e-3965272d6da5-datadir\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: E1124 13:31:53.173339 5039 secret.go:188] Couldn't get secret openshift-logging/collector-metrics: secret "collector-metrics" not found Nov 24 13:31:53 crc kubenswrapper[5039]: E1124 13:31:53.173399 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/630e1a06-0245-4cef-a99e-3965272d6da5-metrics podName:630e1a06-0245-4cef-a99e-3965272d6da5 nodeName:}" failed. No retries permitted until 2025-11-24 13:31:53.673377807 +0000 UTC m=+826.112502407 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics" (UniqueName: "kubernetes.io/secret/630e1a06-0245-4cef-a99e-3965272d6da5-metrics") pod "collector-9klmk" (UID: "630e1a06-0245-4cef-a99e-3965272d6da5") : secret "collector-metrics" not found Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.173908 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-entrypoint\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.174805 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-trusted-ca\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.174753 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-config\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.174670 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-config-openshift-service-cacrt\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.179803 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/630e1a06-0245-4cef-a99e-3965272d6da5-collector-token\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.180237 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/630e1a06-0245-4cef-a99e-3965272d6da5-collector-syslog-receiver\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.189184 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bl4tn\" (UniqueName: \"kubernetes.io/projected/630e1a06-0245-4cef-a99e-3965272d6da5-kube-api-access-bl4tn\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.192091 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/630e1a06-0245-4cef-a99e-3965272d6da5-sa-token\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.196705 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/630e1a06-0245-4cef-a99e-3965272d6da5-tmp\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.521256 5039 generic.go:334] "Generic (PLEG): container finished" podID="5dff4d9c-db94-4c31-be35-953d92db1d64" containerID="e598bf47330b012afc97e89f5049ddc12a1685f28ed4c2f0bacbd355fb6e033c" exitCode=0 Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.521368 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.522034 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bvpmc" event={"ID":"5dff4d9c-db94-4c31-be35-953d92db1d64","Type":"ContainerDied","Data":"e598bf47330b012afc97e89f5049ddc12a1685f28ed4c2f0bacbd355fb6e033c"} Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.522079 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bvpmc" event={"ID":"5dff4d9c-db94-4c31-be35-953d92db1d64","Type":"ContainerStarted","Data":"032799a824cdcb174697249024c0397f99ab579fa3cb583a8eb72d950875ea89"} Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.536816 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.577882 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/630e1a06-0245-4cef-a99e-3965272d6da5-collector-syslog-receiver\") pod \"630e1a06-0245-4cef-a99e-3965272d6da5\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.577968 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/630e1a06-0245-4cef-a99e-3965272d6da5-sa-token\") pod \"630e1a06-0245-4cef-a99e-3965272d6da5\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.578010 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/630e1a06-0245-4cef-a99e-3965272d6da5-tmp\") pod \"630e1a06-0245-4cef-a99e-3965272d6da5\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.578059 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-entrypoint\") pod \"630e1a06-0245-4cef-a99e-3965272d6da5\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.578123 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-config\") pod \"630e1a06-0245-4cef-a99e-3965272d6da5\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.578147 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bl4tn\" (UniqueName: \"kubernetes.io/projected/630e1a06-0245-4cef-a99e-3965272d6da5-kube-api-access-bl4tn\") pod \"630e1a06-0245-4cef-a99e-3965272d6da5\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.578184 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/630e1a06-0245-4cef-a99e-3965272d6da5-datadir\") pod \"630e1a06-0245-4cef-a99e-3965272d6da5\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.578216 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-config-openshift-service-cacrt\") pod \"630e1a06-0245-4cef-a99e-3965272d6da5\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.578232 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/630e1a06-0245-4cef-a99e-3965272d6da5-collector-token\") pod \"630e1a06-0245-4cef-a99e-3965272d6da5\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.578251 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-trusted-ca\") pod \"630e1a06-0245-4cef-a99e-3965272d6da5\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.578538 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/630e1a06-0245-4cef-a99e-3965272d6da5-datadir" (OuterVolumeSpecName: "datadir") pod "630e1a06-0245-4cef-a99e-3965272d6da5" (UID: "630e1a06-0245-4cef-a99e-3965272d6da5"). InnerVolumeSpecName "datadir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.578958 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-entrypoint" (OuterVolumeSpecName: "entrypoint") pod "630e1a06-0245-4cef-a99e-3965272d6da5" (UID: "630e1a06-0245-4cef-a99e-3965272d6da5"). InnerVolumeSpecName "entrypoint". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.579004 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "630e1a06-0245-4cef-a99e-3965272d6da5" (UID: "630e1a06-0245-4cef-a99e-3965272d6da5"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.579320 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-config-openshift-service-cacrt" (OuterVolumeSpecName: "config-openshift-service-cacrt") pod "630e1a06-0245-4cef-a99e-3965272d6da5" (UID: "630e1a06-0245-4cef-a99e-3965272d6da5"). InnerVolumeSpecName "config-openshift-service-cacrt". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.579347 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-config" (OuterVolumeSpecName: "config") pod "630e1a06-0245-4cef-a99e-3965272d6da5" (UID: "630e1a06-0245-4cef-a99e-3965272d6da5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.581886 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/630e1a06-0245-4cef-a99e-3965272d6da5-collector-token" (OuterVolumeSpecName: "collector-token") pod "630e1a06-0245-4cef-a99e-3965272d6da5" (UID: "630e1a06-0245-4cef-a99e-3965272d6da5"). InnerVolumeSpecName "collector-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.582558 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/630e1a06-0245-4cef-a99e-3965272d6da5-tmp" (OuterVolumeSpecName: "tmp") pod "630e1a06-0245-4cef-a99e-3965272d6da5" (UID: "630e1a06-0245-4cef-a99e-3965272d6da5"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.582857 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/630e1a06-0245-4cef-a99e-3965272d6da5-collector-syslog-receiver" (OuterVolumeSpecName: "collector-syslog-receiver") pod "630e1a06-0245-4cef-a99e-3965272d6da5" (UID: "630e1a06-0245-4cef-a99e-3965272d6da5"). InnerVolumeSpecName "collector-syslog-receiver". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.585519 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/630e1a06-0245-4cef-a99e-3965272d6da5-sa-token" (OuterVolumeSpecName: "sa-token") pod "630e1a06-0245-4cef-a99e-3965272d6da5" (UID: "630e1a06-0245-4cef-a99e-3965272d6da5"). InnerVolumeSpecName "sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.593770 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/630e1a06-0245-4cef-a99e-3965272d6da5-kube-api-access-bl4tn" (OuterVolumeSpecName: "kube-api-access-bl4tn") pod "630e1a06-0245-4cef-a99e-3965272d6da5" (UID: "630e1a06-0245-4cef-a99e-3965272d6da5"). InnerVolumeSpecName "kube-api-access-bl4tn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.679674 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/630e1a06-0245-4cef-a99e-3965272d6da5-metrics\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.679829 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.679847 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bl4tn\" (UniqueName: \"kubernetes.io/projected/630e1a06-0245-4cef-a99e-3965272d6da5-kube-api-access-bl4tn\") on node \"crc\" DevicePath \"\"" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.679861 5039 reconciler_common.go:293] "Volume detached for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/630e1a06-0245-4cef-a99e-3965272d6da5-datadir\") on node \"crc\" DevicePath \"\"" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.679873 5039 reconciler_common.go:293] "Volume detached for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-config-openshift-service-cacrt\") on node \"crc\" DevicePath \"\"" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.679886 5039 reconciler_common.go:293] "Volume detached for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/630e1a06-0245-4cef-a99e-3965272d6da5-collector-token\") on node \"crc\" DevicePath \"\"" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.679898 5039 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.679910 5039 reconciler_common.go:293] "Volume detached for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/630e1a06-0245-4cef-a99e-3965272d6da5-collector-syslog-receiver\") on node \"crc\" DevicePath \"\"" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.679921 5039 reconciler_common.go:293] "Volume detached for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/630e1a06-0245-4cef-a99e-3965272d6da5-sa-token\") on node \"crc\" DevicePath \"\"" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.679931 5039 reconciler_common.go:293] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/630e1a06-0245-4cef-a99e-3965272d6da5-tmp\") on node \"crc\" DevicePath \"\"" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.679942 5039 reconciler_common.go:293] "Volume detached for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/630e1a06-0245-4cef-a99e-3965272d6da5-entrypoint\") on node \"crc\" DevicePath \"\"" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.682552 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/630e1a06-0245-4cef-a99e-3965272d6da5-metrics\") pod \"collector-9klmk\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " pod="openshift-logging/collector-9klmk" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.781564 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/630e1a06-0245-4cef-a99e-3965272d6da5-metrics\") pod \"630e1a06-0245-4cef-a99e-3965272d6da5\" (UID: \"630e1a06-0245-4cef-a99e-3965272d6da5\") " Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.786035 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/630e1a06-0245-4cef-a99e-3965272d6da5-metrics" (OuterVolumeSpecName: "metrics") pod "630e1a06-0245-4cef-a99e-3965272d6da5" (UID: "630e1a06-0245-4cef-a99e-3965272d6da5"). InnerVolumeSpecName "metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:31:53 crc kubenswrapper[5039]: I1124 13:31:53.883692 5039 reconciler_common.go:293] "Volume detached for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/630e1a06-0245-4cef-a99e-3965272d6da5-metrics\") on node \"crc\" DevicePath \"\"" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.529175 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-9klmk" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.591248 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-9klmk"] Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.602560 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-logging/collector-9klmk"] Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.613378 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-cqptg"] Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.614682 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.628723 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.628749 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.629039 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.629275 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-frqvk" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.629984 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.631132 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-cqptg"] Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.633162 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.697196 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69rm9\" (UniqueName: \"kubernetes.io/projected/a477b3d9-ef5d-4254-bc37-62f62a3ac851-kube-api-access-69rm9\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.697252 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/a477b3d9-ef5d-4254-bc37-62f62a3ac851-datadir\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.697282 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/a477b3d9-ef5d-4254-bc37-62f62a3ac851-config-openshift-service-cacrt\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.697300 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a477b3d9-ef5d-4254-bc37-62f62a3ac851-trusted-ca\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.697317 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a477b3d9-ef5d-4254-bc37-62f62a3ac851-config\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.697360 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/a477b3d9-ef5d-4254-bc37-62f62a3ac851-sa-token\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.697387 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/a477b3d9-ef5d-4254-bc37-62f62a3ac851-collector-syslog-receiver\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.697411 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/a477b3d9-ef5d-4254-bc37-62f62a3ac851-metrics\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.697451 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/a477b3d9-ef5d-4254-bc37-62f62a3ac851-tmp\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.697471 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/a477b3d9-ef5d-4254-bc37-62f62a3ac851-entrypoint\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.697540 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/a477b3d9-ef5d-4254-bc37-62f62a3ac851-collector-token\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.798692 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/a477b3d9-ef5d-4254-bc37-62f62a3ac851-datadir\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.799165 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/a477b3d9-ef5d-4254-bc37-62f62a3ac851-config-openshift-service-cacrt\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.798839 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/a477b3d9-ef5d-4254-bc37-62f62a3ac851-datadir\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.799228 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a477b3d9-ef5d-4254-bc37-62f62a3ac851-trusted-ca\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.799284 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a477b3d9-ef5d-4254-bc37-62f62a3ac851-config\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.799392 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/a477b3d9-ef5d-4254-bc37-62f62a3ac851-sa-token\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.799457 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/a477b3d9-ef5d-4254-bc37-62f62a3ac851-collector-syslog-receiver\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.799542 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/a477b3d9-ef5d-4254-bc37-62f62a3ac851-metrics\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.799801 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/a477b3d9-ef5d-4254-bc37-62f62a3ac851-entrypoint\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.799849 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/a477b3d9-ef5d-4254-bc37-62f62a3ac851-tmp\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.799894 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/a477b3d9-ef5d-4254-bc37-62f62a3ac851-collector-token\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.799984 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69rm9\" (UniqueName: \"kubernetes.io/projected/a477b3d9-ef5d-4254-bc37-62f62a3ac851-kube-api-access-69rm9\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.800081 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/a477b3d9-ef5d-4254-bc37-62f62a3ac851-config-openshift-service-cacrt\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.800248 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a477b3d9-ef5d-4254-bc37-62f62a3ac851-config\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.800821 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/a477b3d9-ef5d-4254-bc37-62f62a3ac851-entrypoint\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.801038 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a477b3d9-ef5d-4254-bc37-62f62a3ac851-trusted-ca\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.805375 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/a477b3d9-ef5d-4254-bc37-62f62a3ac851-metrics\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.806877 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/a477b3d9-ef5d-4254-bc37-62f62a3ac851-collector-syslog-receiver\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.807028 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/a477b3d9-ef5d-4254-bc37-62f62a3ac851-tmp\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.812414 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/a477b3d9-ef5d-4254-bc37-62f62a3ac851-collector-token\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.821129 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69rm9\" (UniqueName: \"kubernetes.io/projected/a477b3d9-ef5d-4254-bc37-62f62a3ac851-kube-api-access-69rm9\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.824971 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/a477b3d9-ef5d-4254-bc37-62f62a3ac851-sa-token\") pod \"collector-cqptg\" (UID: \"a477b3d9-ef5d-4254-bc37-62f62a3ac851\") " pod="openshift-logging/collector-cqptg" Nov 24 13:31:54 crc kubenswrapper[5039]: I1124 13:31:54.940401 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-cqptg" Nov 24 13:31:55 crc kubenswrapper[5039]: I1124 13:31:55.362712 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-cqptg"] Nov 24 13:31:55 crc kubenswrapper[5039]: W1124 13:31:55.372214 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda477b3d9_ef5d_4254_bc37_62f62a3ac851.slice/crio-c0f02d25ce5cfa4ccc5719e116f9e7209da8cbe130e76e18c9cc58b2a6c39dce WatchSource:0}: Error finding container c0f02d25ce5cfa4ccc5719e116f9e7209da8cbe130e76e18c9cc58b2a6c39dce: Status 404 returned error can't find the container with id c0f02d25ce5cfa4ccc5719e116f9e7209da8cbe130e76e18c9cc58b2a6c39dce Nov 24 13:31:55 crc kubenswrapper[5039]: I1124 13:31:55.539036 5039 generic.go:334] "Generic (PLEG): container finished" podID="5dff4d9c-db94-4c31-be35-953d92db1d64" containerID="1227c72b19d65dc88e9879ee84f72735c5d2dea1187eb18a062f33e9ee10e92c" exitCode=0 Nov 24 13:31:55 crc kubenswrapper[5039]: I1124 13:31:55.539140 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bvpmc" event={"ID":"5dff4d9c-db94-4c31-be35-953d92db1d64","Type":"ContainerDied","Data":"1227c72b19d65dc88e9879ee84f72735c5d2dea1187eb18a062f33e9ee10e92c"} Nov 24 13:31:55 crc kubenswrapper[5039]: I1124 13:31:55.540688 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-cqptg" event={"ID":"a477b3d9-ef5d-4254-bc37-62f62a3ac851","Type":"ContainerStarted","Data":"c0f02d25ce5cfa4ccc5719e116f9e7209da8cbe130e76e18c9cc58b2a6c39dce"} Nov 24 13:31:56 crc kubenswrapper[5039]: I1124 13:31:56.483270 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="630e1a06-0245-4cef-a99e-3965272d6da5" path="/var/lib/kubelet/pods/630e1a06-0245-4cef-a99e-3965272d6da5/volumes" Nov 24 13:31:57 crc kubenswrapper[5039]: I1124 13:31:57.557643 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bvpmc" event={"ID":"5dff4d9c-db94-4c31-be35-953d92db1d64","Type":"ContainerStarted","Data":"6fb444af8f68bfa1fbd953f1170388725ee6e55f517ddb7141cdf678a8dec363"} Nov 24 13:31:57 crc kubenswrapper[5039]: I1124 13:31:57.583984 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bvpmc" podStartSLOduration=3.165260501 podStartE2EDuration="6.583965275s" podCreationTimestamp="2025-11-24 13:31:51 +0000 UTC" firstStartedPulling="2025-11-24 13:31:53.526688333 +0000 UTC m=+825.965812873" lastFinishedPulling="2025-11-24 13:31:56.945393107 +0000 UTC m=+829.384517647" observedRunningTime="2025-11-24 13:31:57.580427069 +0000 UTC m=+830.019551589" watchObservedRunningTime="2025-11-24 13:31:57.583965275 +0000 UTC m=+830.023089775" Nov 24 13:32:02 crc kubenswrapper[5039]: I1124 13:32:02.122056 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bvpmc" Nov 24 13:32:02 crc kubenswrapper[5039]: I1124 13:32:02.122641 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bvpmc" Nov 24 13:32:02 crc kubenswrapper[5039]: I1124 13:32:02.162560 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bvpmc" Nov 24 13:32:02 crc kubenswrapper[5039]: I1124 13:32:02.639045 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bvpmc" Nov 24 13:32:02 crc kubenswrapper[5039]: I1124 13:32:02.679671 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bvpmc"] Nov 24 13:32:03 crc kubenswrapper[5039]: I1124 13:32:03.606094 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-cqptg" event={"ID":"a477b3d9-ef5d-4254-bc37-62f62a3ac851","Type":"ContainerStarted","Data":"e65c9b8920b64039d86a8b4017aea73729d3b7ad088454f28648c5f538e4bcd9"} Nov 24 13:32:04 crc kubenswrapper[5039]: I1124 13:32:04.616410 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bvpmc" podUID="5dff4d9c-db94-4c31-be35-953d92db1d64" containerName="registry-server" containerID="cri-o://6fb444af8f68bfa1fbd953f1170388725ee6e55f517ddb7141cdf678a8dec363" gracePeriod=2 Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.075439 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bvpmc" Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.094681 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/collector-cqptg" podStartSLOduration=3.348591392 podStartE2EDuration="11.094663094s" podCreationTimestamp="2025-11-24 13:31:54 +0000 UTC" firstStartedPulling="2025-11-24 13:31:55.374294241 +0000 UTC m=+827.813418741" lastFinishedPulling="2025-11-24 13:32:03.120365933 +0000 UTC m=+835.559490443" observedRunningTime="2025-11-24 13:32:03.625837014 +0000 UTC m=+836.064961514" watchObservedRunningTime="2025-11-24 13:32:05.094663094 +0000 UTC m=+837.533787614" Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.162751 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bvxbv\" (UniqueName: \"kubernetes.io/projected/5dff4d9c-db94-4c31-be35-953d92db1d64-kube-api-access-bvxbv\") pod \"5dff4d9c-db94-4c31-be35-953d92db1d64\" (UID: \"5dff4d9c-db94-4c31-be35-953d92db1d64\") " Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.163722 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5dff4d9c-db94-4c31-be35-953d92db1d64-utilities\") pod \"5dff4d9c-db94-4c31-be35-953d92db1d64\" (UID: \"5dff4d9c-db94-4c31-be35-953d92db1d64\") " Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.163833 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5dff4d9c-db94-4c31-be35-953d92db1d64-catalog-content\") pod \"5dff4d9c-db94-4c31-be35-953d92db1d64\" (UID: \"5dff4d9c-db94-4c31-be35-953d92db1d64\") " Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.165812 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5dff4d9c-db94-4c31-be35-953d92db1d64-utilities" (OuterVolumeSpecName: "utilities") pod "5dff4d9c-db94-4c31-be35-953d92db1d64" (UID: "5dff4d9c-db94-4c31-be35-953d92db1d64"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.169430 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5dff4d9c-db94-4c31-be35-953d92db1d64-kube-api-access-bvxbv" (OuterVolumeSpecName: "kube-api-access-bvxbv") pod "5dff4d9c-db94-4c31-be35-953d92db1d64" (UID: "5dff4d9c-db94-4c31-be35-953d92db1d64"). InnerVolumeSpecName "kube-api-access-bvxbv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.228324 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5dff4d9c-db94-4c31-be35-953d92db1d64-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5dff4d9c-db94-4c31-be35-953d92db1d64" (UID: "5dff4d9c-db94-4c31-be35-953d92db1d64"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.266215 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5dff4d9c-db94-4c31-be35-953d92db1d64-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.266540 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bvxbv\" (UniqueName: \"kubernetes.io/projected/5dff4d9c-db94-4c31-be35-953d92db1d64-kube-api-access-bvxbv\") on node \"crc\" DevicePath \"\"" Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.266559 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5dff4d9c-db94-4c31-be35-953d92db1d64-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.625259 5039 generic.go:334] "Generic (PLEG): container finished" podID="5dff4d9c-db94-4c31-be35-953d92db1d64" containerID="6fb444af8f68bfa1fbd953f1170388725ee6e55f517ddb7141cdf678a8dec363" exitCode=0 Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.625302 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bvpmc" event={"ID":"5dff4d9c-db94-4c31-be35-953d92db1d64","Type":"ContainerDied","Data":"6fb444af8f68bfa1fbd953f1170388725ee6e55f517ddb7141cdf678a8dec363"} Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.625315 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bvpmc" Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.625331 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bvpmc" event={"ID":"5dff4d9c-db94-4c31-be35-953d92db1d64","Type":"ContainerDied","Data":"032799a824cdcb174697249024c0397f99ab579fa3cb583a8eb72d950875ea89"} Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.625348 5039 scope.go:117] "RemoveContainer" containerID="6fb444af8f68bfa1fbd953f1170388725ee6e55f517ddb7141cdf678a8dec363" Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.646184 5039 scope.go:117] "RemoveContainer" containerID="1227c72b19d65dc88e9879ee84f72735c5d2dea1187eb18a062f33e9ee10e92c" Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.668461 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bvpmc"] Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.675309 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bvpmc"] Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.693953 5039 scope.go:117] "RemoveContainer" containerID="e598bf47330b012afc97e89f5049ddc12a1685f28ed4c2f0bacbd355fb6e033c" Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.712418 5039 scope.go:117] "RemoveContainer" containerID="6fb444af8f68bfa1fbd953f1170388725ee6e55f517ddb7141cdf678a8dec363" Nov 24 13:32:05 crc kubenswrapper[5039]: E1124 13:32:05.712856 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6fb444af8f68bfa1fbd953f1170388725ee6e55f517ddb7141cdf678a8dec363\": container with ID starting with 6fb444af8f68bfa1fbd953f1170388725ee6e55f517ddb7141cdf678a8dec363 not found: ID does not exist" containerID="6fb444af8f68bfa1fbd953f1170388725ee6e55f517ddb7141cdf678a8dec363" Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.712898 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6fb444af8f68bfa1fbd953f1170388725ee6e55f517ddb7141cdf678a8dec363"} err="failed to get container status \"6fb444af8f68bfa1fbd953f1170388725ee6e55f517ddb7141cdf678a8dec363\": rpc error: code = NotFound desc = could not find container \"6fb444af8f68bfa1fbd953f1170388725ee6e55f517ddb7141cdf678a8dec363\": container with ID starting with 6fb444af8f68bfa1fbd953f1170388725ee6e55f517ddb7141cdf678a8dec363 not found: ID does not exist" Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.712942 5039 scope.go:117] "RemoveContainer" containerID="1227c72b19d65dc88e9879ee84f72735c5d2dea1187eb18a062f33e9ee10e92c" Nov 24 13:32:05 crc kubenswrapper[5039]: E1124 13:32:05.713283 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1227c72b19d65dc88e9879ee84f72735c5d2dea1187eb18a062f33e9ee10e92c\": container with ID starting with 1227c72b19d65dc88e9879ee84f72735c5d2dea1187eb18a062f33e9ee10e92c not found: ID does not exist" containerID="1227c72b19d65dc88e9879ee84f72735c5d2dea1187eb18a062f33e9ee10e92c" Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.713326 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1227c72b19d65dc88e9879ee84f72735c5d2dea1187eb18a062f33e9ee10e92c"} err="failed to get container status \"1227c72b19d65dc88e9879ee84f72735c5d2dea1187eb18a062f33e9ee10e92c\": rpc error: code = NotFound desc = could not find container \"1227c72b19d65dc88e9879ee84f72735c5d2dea1187eb18a062f33e9ee10e92c\": container with ID starting with 1227c72b19d65dc88e9879ee84f72735c5d2dea1187eb18a062f33e9ee10e92c not found: ID does not exist" Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.713339 5039 scope.go:117] "RemoveContainer" containerID="e598bf47330b012afc97e89f5049ddc12a1685f28ed4c2f0bacbd355fb6e033c" Nov 24 13:32:05 crc kubenswrapper[5039]: E1124 13:32:05.713804 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e598bf47330b012afc97e89f5049ddc12a1685f28ed4c2f0bacbd355fb6e033c\": container with ID starting with e598bf47330b012afc97e89f5049ddc12a1685f28ed4c2f0bacbd355fb6e033c not found: ID does not exist" containerID="e598bf47330b012afc97e89f5049ddc12a1685f28ed4c2f0bacbd355fb6e033c" Nov 24 13:32:05 crc kubenswrapper[5039]: I1124 13:32:05.713827 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e598bf47330b012afc97e89f5049ddc12a1685f28ed4c2f0bacbd355fb6e033c"} err="failed to get container status \"e598bf47330b012afc97e89f5049ddc12a1685f28ed4c2f0bacbd355fb6e033c\": rpc error: code = NotFound desc = could not find container \"e598bf47330b012afc97e89f5049ddc12a1685f28ed4c2f0bacbd355fb6e033c\": container with ID starting with e598bf47330b012afc97e89f5049ddc12a1685f28ed4c2f0bacbd355fb6e033c not found: ID does not exist" Nov 24 13:32:06 crc kubenswrapper[5039]: I1124 13:32:06.323385 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5dff4d9c-db94-4c31-be35-953d92db1d64" path="/var/lib/kubelet/pods/5dff4d9c-db94-4c31-be35-953d92db1d64/volumes" Nov 24 13:32:26 crc kubenswrapper[5039]: I1124 13:32:26.942289 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb"] Nov 24 13:32:26 crc kubenswrapper[5039]: E1124 13:32:26.943086 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dff4d9c-db94-4c31-be35-953d92db1d64" containerName="extract-content" Nov 24 13:32:26 crc kubenswrapper[5039]: I1124 13:32:26.943100 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dff4d9c-db94-4c31-be35-953d92db1d64" containerName="extract-content" Nov 24 13:32:26 crc kubenswrapper[5039]: E1124 13:32:26.943112 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dff4d9c-db94-4c31-be35-953d92db1d64" containerName="registry-server" Nov 24 13:32:26 crc kubenswrapper[5039]: I1124 13:32:26.943120 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dff4d9c-db94-4c31-be35-953d92db1d64" containerName="registry-server" Nov 24 13:32:26 crc kubenswrapper[5039]: E1124 13:32:26.943150 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dff4d9c-db94-4c31-be35-953d92db1d64" containerName="extract-utilities" Nov 24 13:32:26 crc kubenswrapper[5039]: I1124 13:32:26.943158 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dff4d9c-db94-4c31-be35-953d92db1d64" containerName="extract-utilities" Nov 24 13:32:26 crc kubenswrapper[5039]: I1124 13:32:26.943279 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="5dff4d9c-db94-4c31-be35-953d92db1d64" containerName="registry-server" Nov 24 13:32:26 crc kubenswrapper[5039]: I1124 13:32:26.944386 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb" Nov 24 13:32:26 crc kubenswrapper[5039]: I1124 13:32:26.946061 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 24 13:32:26 crc kubenswrapper[5039]: I1124 13:32:26.952434 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb"] Nov 24 13:32:27 crc kubenswrapper[5039]: I1124 13:32:27.112317 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/175053d9-6995-4edc-9e0b-a72a0e10ae72-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb\" (UID: \"175053d9-6995-4edc-9e0b-a72a0e10ae72\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb" Nov 24 13:32:27 crc kubenswrapper[5039]: I1124 13:32:27.112351 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/175053d9-6995-4edc-9e0b-a72a0e10ae72-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb\" (UID: \"175053d9-6995-4edc-9e0b-a72a0e10ae72\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb" Nov 24 13:32:27 crc kubenswrapper[5039]: I1124 13:32:27.112387 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rw6ln\" (UniqueName: \"kubernetes.io/projected/175053d9-6995-4edc-9e0b-a72a0e10ae72-kube-api-access-rw6ln\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb\" (UID: \"175053d9-6995-4edc-9e0b-a72a0e10ae72\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb" Nov 24 13:32:27 crc kubenswrapper[5039]: I1124 13:32:27.213606 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/175053d9-6995-4edc-9e0b-a72a0e10ae72-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb\" (UID: \"175053d9-6995-4edc-9e0b-a72a0e10ae72\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb" Nov 24 13:32:27 crc kubenswrapper[5039]: I1124 13:32:27.213662 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/175053d9-6995-4edc-9e0b-a72a0e10ae72-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb\" (UID: \"175053d9-6995-4edc-9e0b-a72a0e10ae72\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb" Nov 24 13:32:27 crc kubenswrapper[5039]: I1124 13:32:27.213718 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rw6ln\" (UniqueName: \"kubernetes.io/projected/175053d9-6995-4edc-9e0b-a72a0e10ae72-kube-api-access-rw6ln\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb\" (UID: \"175053d9-6995-4edc-9e0b-a72a0e10ae72\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb" Nov 24 13:32:27 crc kubenswrapper[5039]: I1124 13:32:27.214183 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/175053d9-6995-4edc-9e0b-a72a0e10ae72-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb\" (UID: \"175053d9-6995-4edc-9e0b-a72a0e10ae72\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb" Nov 24 13:32:27 crc kubenswrapper[5039]: I1124 13:32:27.214230 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/175053d9-6995-4edc-9e0b-a72a0e10ae72-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb\" (UID: \"175053d9-6995-4edc-9e0b-a72a0e10ae72\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb" Nov 24 13:32:27 crc kubenswrapper[5039]: I1124 13:32:27.235269 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rw6ln\" (UniqueName: \"kubernetes.io/projected/175053d9-6995-4edc-9e0b-a72a0e10ae72-kube-api-access-rw6ln\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb\" (UID: \"175053d9-6995-4edc-9e0b-a72a0e10ae72\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb" Nov 24 13:32:27 crc kubenswrapper[5039]: I1124 13:32:27.317469 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb" Nov 24 13:32:27 crc kubenswrapper[5039]: I1124 13:32:27.764639 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb"] Nov 24 13:32:27 crc kubenswrapper[5039]: I1124 13:32:27.796771 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb" event={"ID":"175053d9-6995-4edc-9e0b-a72a0e10ae72","Type":"ContainerStarted","Data":"e2e17dcdd2f9217890f53760541136b3c9f1662aa804ca10a9f23836a6a0a77a"} Nov 24 13:32:28 crc kubenswrapper[5039]: I1124 13:32:28.804990 5039 generic.go:334] "Generic (PLEG): container finished" podID="175053d9-6995-4edc-9e0b-a72a0e10ae72" containerID="6972dc122b03934be19f15c7e3038ac4990c8c6d7f464ee5da3f162dce7ff95c" exitCode=0 Nov 24 13:32:28 crc kubenswrapper[5039]: I1124 13:32:28.805041 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb" event={"ID":"175053d9-6995-4edc-9e0b-a72a0e10ae72","Type":"ContainerDied","Data":"6972dc122b03934be19f15c7e3038ac4990c8c6d7f464ee5da3f162dce7ff95c"} Nov 24 13:32:31 crc kubenswrapper[5039]: I1124 13:32:31.827398 5039 generic.go:334] "Generic (PLEG): container finished" podID="175053d9-6995-4edc-9e0b-a72a0e10ae72" containerID="5d8b27cbd025ce01cfb03950b46ac72b462377f30a7b4afd75da6fd416690572" exitCode=0 Nov 24 13:32:31 crc kubenswrapper[5039]: I1124 13:32:31.827452 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb" event={"ID":"175053d9-6995-4edc-9e0b-a72a0e10ae72","Type":"ContainerDied","Data":"5d8b27cbd025ce01cfb03950b46ac72b462377f30a7b4afd75da6fd416690572"} Nov 24 13:32:32 crc kubenswrapper[5039]: I1124 13:32:32.836495 5039 generic.go:334] "Generic (PLEG): container finished" podID="175053d9-6995-4edc-9e0b-a72a0e10ae72" containerID="9fb0d639f8516187240d6459452c7f9486ec698084029b10582199e7c26315d1" exitCode=0 Nov 24 13:32:32 crc kubenswrapper[5039]: I1124 13:32:32.836564 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb" event={"ID":"175053d9-6995-4edc-9e0b-a72a0e10ae72","Type":"ContainerDied","Data":"9fb0d639f8516187240d6459452c7f9486ec698084029b10582199e7c26315d1"} Nov 24 13:32:34 crc kubenswrapper[5039]: I1124 13:32:34.202123 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb" Nov 24 13:32:34 crc kubenswrapper[5039]: I1124 13:32:34.330185 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/175053d9-6995-4edc-9e0b-a72a0e10ae72-util\") pod \"175053d9-6995-4edc-9e0b-a72a0e10ae72\" (UID: \"175053d9-6995-4edc-9e0b-a72a0e10ae72\") " Nov 24 13:32:34 crc kubenswrapper[5039]: I1124 13:32:34.330631 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rw6ln\" (UniqueName: \"kubernetes.io/projected/175053d9-6995-4edc-9e0b-a72a0e10ae72-kube-api-access-rw6ln\") pod \"175053d9-6995-4edc-9e0b-a72a0e10ae72\" (UID: \"175053d9-6995-4edc-9e0b-a72a0e10ae72\") " Nov 24 13:32:34 crc kubenswrapper[5039]: I1124 13:32:34.330717 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/175053d9-6995-4edc-9e0b-a72a0e10ae72-bundle\") pod \"175053d9-6995-4edc-9e0b-a72a0e10ae72\" (UID: \"175053d9-6995-4edc-9e0b-a72a0e10ae72\") " Nov 24 13:32:34 crc kubenswrapper[5039]: I1124 13:32:34.331276 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/175053d9-6995-4edc-9e0b-a72a0e10ae72-bundle" (OuterVolumeSpecName: "bundle") pod "175053d9-6995-4edc-9e0b-a72a0e10ae72" (UID: "175053d9-6995-4edc-9e0b-a72a0e10ae72"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:32:34 crc kubenswrapper[5039]: I1124 13:32:34.336678 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/175053d9-6995-4edc-9e0b-a72a0e10ae72-kube-api-access-rw6ln" (OuterVolumeSpecName: "kube-api-access-rw6ln") pod "175053d9-6995-4edc-9e0b-a72a0e10ae72" (UID: "175053d9-6995-4edc-9e0b-a72a0e10ae72"). InnerVolumeSpecName "kube-api-access-rw6ln". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:32:34 crc kubenswrapper[5039]: I1124 13:32:34.340912 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/175053d9-6995-4edc-9e0b-a72a0e10ae72-util" (OuterVolumeSpecName: "util") pod "175053d9-6995-4edc-9e0b-a72a0e10ae72" (UID: "175053d9-6995-4edc-9e0b-a72a0e10ae72"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:32:34 crc kubenswrapper[5039]: I1124 13:32:34.432938 5039 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/175053d9-6995-4edc-9e0b-a72a0e10ae72-util\") on node \"crc\" DevicePath \"\"" Nov 24 13:32:34 crc kubenswrapper[5039]: I1124 13:32:34.432975 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rw6ln\" (UniqueName: \"kubernetes.io/projected/175053d9-6995-4edc-9e0b-a72a0e10ae72-kube-api-access-rw6ln\") on node \"crc\" DevicePath \"\"" Nov 24 13:32:34 crc kubenswrapper[5039]: I1124 13:32:34.432987 5039 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/175053d9-6995-4edc-9e0b-a72a0e10ae72-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:32:34 crc kubenswrapper[5039]: I1124 13:32:34.852303 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb" event={"ID":"175053d9-6995-4edc-9e0b-a72a0e10ae72","Type":"ContainerDied","Data":"e2e17dcdd2f9217890f53760541136b3c9f1662aa804ca10a9f23836a6a0a77a"} Nov 24 13:32:34 crc kubenswrapper[5039]: I1124 13:32:34.852367 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e2e17dcdd2f9217890f53760541136b3c9f1662aa804ca10a9f23836a6a0a77a" Nov 24 13:32:34 crc kubenswrapper[5039]: I1124 13:32:34.852400 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb" Nov 24 13:32:38 crc kubenswrapper[5039]: I1124 13:32:38.901686 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-55t2b"] Nov 24 13:32:38 crc kubenswrapper[5039]: E1124 13:32:38.902257 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="175053d9-6995-4edc-9e0b-a72a0e10ae72" containerName="pull" Nov 24 13:32:38 crc kubenswrapper[5039]: I1124 13:32:38.902274 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="175053d9-6995-4edc-9e0b-a72a0e10ae72" containerName="pull" Nov 24 13:32:38 crc kubenswrapper[5039]: E1124 13:32:38.902291 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="175053d9-6995-4edc-9e0b-a72a0e10ae72" containerName="util" Nov 24 13:32:38 crc kubenswrapper[5039]: I1124 13:32:38.902298 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="175053d9-6995-4edc-9e0b-a72a0e10ae72" containerName="util" Nov 24 13:32:38 crc kubenswrapper[5039]: E1124 13:32:38.902307 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="175053d9-6995-4edc-9e0b-a72a0e10ae72" containerName="extract" Nov 24 13:32:38 crc kubenswrapper[5039]: I1124 13:32:38.902316 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="175053d9-6995-4edc-9e0b-a72a0e10ae72" containerName="extract" Nov 24 13:32:38 crc kubenswrapper[5039]: I1124 13:32:38.902451 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="175053d9-6995-4edc-9e0b-a72a0e10ae72" containerName="extract" Nov 24 13:32:38 crc kubenswrapper[5039]: I1124 13:32:38.903031 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-55t2b" Nov 24 13:32:38 crc kubenswrapper[5039]: I1124 13:32:38.905076 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 24 13:32:38 crc kubenswrapper[5039]: I1124 13:32:38.905294 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-hthkj" Nov 24 13:32:38 crc kubenswrapper[5039]: I1124 13:32:38.905748 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 24 13:32:38 crc kubenswrapper[5039]: I1124 13:32:38.909906 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-55t2b"] Nov 24 13:32:38 crc kubenswrapper[5039]: I1124 13:32:38.988658 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpw9k\" (UniqueName: \"kubernetes.io/projected/a78868c4-aedd-4fe3-a055-5460cac9f6c4-kube-api-access-vpw9k\") pod \"nmstate-operator-557fdffb88-55t2b\" (UID: \"a78868c4-aedd-4fe3-a055-5460cac9f6c4\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-55t2b" Nov 24 13:32:39 crc kubenswrapper[5039]: I1124 13:32:39.089777 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpw9k\" (UniqueName: \"kubernetes.io/projected/a78868c4-aedd-4fe3-a055-5460cac9f6c4-kube-api-access-vpw9k\") pod \"nmstate-operator-557fdffb88-55t2b\" (UID: \"a78868c4-aedd-4fe3-a055-5460cac9f6c4\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-55t2b" Nov 24 13:32:39 crc kubenswrapper[5039]: I1124 13:32:39.111396 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpw9k\" (UniqueName: \"kubernetes.io/projected/a78868c4-aedd-4fe3-a055-5460cac9f6c4-kube-api-access-vpw9k\") pod \"nmstate-operator-557fdffb88-55t2b\" (UID: \"a78868c4-aedd-4fe3-a055-5460cac9f6c4\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-55t2b" Nov 24 13:32:39 crc kubenswrapper[5039]: I1124 13:32:39.220451 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-55t2b" Nov 24 13:32:39 crc kubenswrapper[5039]: I1124 13:32:39.492185 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-55t2b"] Nov 24 13:32:39 crc kubenswrapper[5039]: I1124 13:32:39.883983 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-55t2b" event={"ID":"a78868c4-aedd-4fe3-a055-5460cac9f6c4","Type":"ContainerStarted","Data":"bfe88129a2a9bf403b35b57742abed4f0347efc2be94a808d91a1d3eb10ffe05"} Nov 24 13:32:42 crc kubenswrapper[5039]: I1124 13:32:42.903362 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-55t2b" event={"ID":"a78868c4-aedd-4fe3-a055-5460cac9f6c4","Type":"ContainerStarted","Data":"a5df977a06aaa0ec6ad65b229b0f925c672f94759bd22ecadc84fc8fa5e195bb"} Nov 24 13:32:42 crc kubenswrapper[5039]: I1124 13:32:42.924673 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-55t2b" podStartSLOduration=2.4577414 podStartE2EDuration="4.924653089s" podCreationTimestamp="2025-11-24 13:32:38 +0000 UTC" firstStartedPulling="2025-11-24 13:32:39.508092972 +0000 UTC m=+871.947217472" lastFinishedPulling="2025-11-24 13:32:41.975004661 +0000 UTC m=+874.414129161" observedRunningTime="2025-11-24 13:32:42.920851747 +0000 UTC m=+875.359976247" watchObservedRunningTime="2025-11-24 13:32:42.924653089 +0000 UTC m=+875.363777589" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.242055 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-fnjvc"] Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.244623 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fnjvc" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.246387 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-8wrst" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.247350 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-j8g7c"] Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.248337 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-j8g7c" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.251638 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.257681 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-fnjvc"] Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.262865 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-ptxfm"] Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.263843 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-ptxfm" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.276851 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-j8g7c"] Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.329800 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/e0e36cbb-009a-4784-a04d-95badbce22d0-dbus-socket\") pod \"nmstate-handler-ptxfm\" (UID: \"e0e36cbb-009a-4784-a04d-95badbce22d0\") " pod="openshift-nmstate/nmstate-handler-ptxfm" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.329853 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/e0e36cbb-009a-4784-a04d-95badbce22d0-nmstate-lock\") pod \"nmstate-handler-ptxfm\" (UID: \"e0e36cbb-009a-4784-a04d-95badbce22d0\") " pod="openshift-nmstate/nmstate-handler-ptxfm" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.329885 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kz6c\" (UniqueName: \"kubernetes.io/projected/e0e36cbb-009a-4784-a04d-95badbce22d0-kube-api-access-4kz6c\") pod \"nmstate-handler-ptxfm\" (UID: \"e0e36cbb-009a-4784-a04d-95badbce22d0\") " pod="openshift-nmstate/nmstate-handler-ptxfm" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.330022 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6d5b\" (UniqueName: \"kubernetes.io/projected/667e819e-83fc-453d-90d3-7b89b63e15a4-kube-api-access-l6d5b\") pod \"nmstate-webhook-6b89b748d8-j8g7c\" (UID: \"667e819e-83fc-453d-90d3-7b89b63e15a4\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-j8g7c" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.330082 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/667e819e-83fc-453d-90d3-7b89b63e15a4-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-j8g7c\" (UID: \"667e819e-83fc-453d-90d3-7b89b63e15a4\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-j8g7c" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.330268 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klmqp\" (UniqueName: \"kubernetes.io/projected/f006222b-71be-4a99-9b20-e048040bd042-kube-api-access-klmqp\") pod \"nmstate-metrics-5dcf9c57c5-fnjvc\" (UID: \"f006222b-71be-4a99-9b20-e048040bd042\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fnjvc" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.330359 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/e0e36cbb-009a-4784-a04d-95badbce22d0-ovs-socket\") pod \"nmstate-handler-ptxfm\" (UID: \"e0e36cbb-009a-4784-a04d-95badbce22d0\") " pod="openshift-nmstate/nmstate-handler-ptxfm" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.358040 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9dqz9"] Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.359220 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9dqz9" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.361028 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-txr5d" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.362596 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.362731 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.371853 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9dqz9"] Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.432318 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klmqp\" (UniqueName: \"kubernetes.io/projected/f006222b-71be-4a99-9b20-e048040bd042-kube-api-access-klmqp\") pod \"nmstate-metrics-5dcf9c57c5-fnjvc\" (UID: \"f006222b-71be-4a99-9b20-e048040bd042\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fnjvc" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.432367 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/1eee0206-49ea-45f5-8c34-547075ba3c65-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-9dqz9\" (UID: \"1eee0206-49ea-45f5-8c34-547075ba3c65\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9dqz9" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.432400 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/e0e36cbb-009a-4784-a04d-95badbce22d0-ovs-socket\") pod \"nmstate-handler-ptxfm\" (UID: \"e0e36cbb-009a-4784-a04d-95badbce22d0\") " pod="openshift-nmstate/nmstate-handler-ptxfm" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.432443 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcjkt\" (UniqueName: \"kubernetes.io/projected/1eee0206-49ea-45f5-8c34-547075ba3c65-kube-api-access-bcjkt\") pod \"nmstate-console-plugin-5874bd7bc5-9dqz9\" (UID: \"1eee0206-49ea-45f5-8c34-547075ba3c65\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9dqz9" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.432567 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/e0e36cbb-009a-4784-a04d-95badbce22d0-dbus-socket\") pod \"nmstate-handler-ptxfm\" (UID: \"e0e36cbb-009a-4784-a04d-95badbce22d0\") " pod="openshift-nmstate/nmstate-handler-ptxfm" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.432624 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/e0e36cbb-009a-4784-a04d-95badbce22d0-nmstate-lock\") pod \"nmstate-handler-ptxfm\" (UID: \"e0e36cbb-009a-4784-a04d-95badbce22d0\") " pod="openshift-nmstate/nmstate-handler-ptxfm" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.432647 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/e0e36cbb-009a-4784-a04d-95badbce22d0-ovs-socket\") pod \"nmstate-handler-ptxfm\" (UID: \"e0e36cbb-009a-4784-a04d-95badbce22d0\") " pod="openshift-nmstate/nmstate-handler-ptxfm" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.432661 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kz6c\" (UniqueName: \"kubernetes.io/projected/e0e36cbb-009a-4784-a04d-95badbce22d0-kube-api-access-4kz6c\") pod \"nmstate-handler-ptxfm\" (UID: \"e0e36cbb-009a-4784-a04d-95badbce22d0\") " pod="openshift-nmstate/nmstate-handler-ptxfm" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.432783 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6d5b\" (UniqueName: \"kubernetes.io/projected/667e819e-83fc-453d-90d3-7b89b63e15a4-kube-api-access-l6d5b\") pod \"nmstate-webhook-6b89b748d8-j8g7c\" (UID: \"667e819e-83fc-453d-90d3-7b89b63e15a4\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-j8g7c" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.432821 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/e0e36cbb-009a-4784-a04d-95badbce22d0-nmstate-lock\") pod \"nmstate-handler-ptxfm\" (UID: \"e0e36cbb-009a-4784-a04d-95badbce22d0\") " pod="openshift-nmstate/nmstate-handler-ptxfm" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.432866 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/1eee0206-49ea-45f5-8c34-547075ba3c65-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-9dqz9\" (UID: \"1eee0206-49ea-45f5-8c34-547075ba3c65\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9dqz9" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.432890 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/667e819e-83fc-453d-90d3-7b89b63e15a4-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-j8g7c\" (UID: \"667e819e-83fc-453d-90d3-7b89b63e15a4\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-j8g7c" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.432947 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/e0e36cbb-009a-4784-a04d-95badbce22d0-dbus-socket\") pod \"nmstate-handler-ptxfm\" (UID: \"e0e36cbb-009a-4784-a04d-95badbce22d0\") " pod="openshift-nmstate/nmstate-handler-ptxfm" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.439304 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/667e819e-83fc-453d-90d3-7b89b63e15a4-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-j8g7c\" (UID: \"667e819e-83fc-453d-90d3-7b89b63e15a4\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-j8g7c" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.446465 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klmqp\" (UniqueName: \"kubernetes.io/projected/f006222b-71be-4a99-9b20-e048040bd042-kube-api-access-klmqp\") pod \"nmstate-metrics-5dcf9c57c5-fnjvc\" (UID: \"f006222b-71be-4a99-9b20-e048040bd042\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fnjvc" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.449738 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kz6c\" (UniqueName: \"kubernetes.io/projected/e0e36cbb-009a-4784-a04d-95badbce22d0-kube-api-access-4kz6c\") pod \"nmstate-handler-ptxfm\" (UID: \"e0e36cbb-009a-4784-a04d-95badbce22d0\") " pod="openshift-nmstate/nmstate-handler-ptxfm" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.451625 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6d5b\" (UniqueName: \"kubernetes.io/projected/667e819e-83fc-453d-90d3-7b89b63e15a4-kube-api-access-l6d5b\") pod \"nmstate-webhook-6b89b748d8-j8g7c\" (UID: \"667e819e-83fc-453d-90d3-7b89b63e15a4\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-j8g7c" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.534074 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/1eee0206-49ea-45f5-8c34-547075ba3c65-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-9dqz9\" (UID: \"1eee0206-49ea-45f5-8c34-547075ba3c65\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9dqz9" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.534158 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcjkt\" (UniqueName: \"kubernetes.io/projected/1eee0206-49ea-45f5-8c34-547075ba3c65-kube-api-access-bcjkt\") pod \"nmstate-console-plugin-5874bd7bc5-9dqz9\" (UID: \"1eee0206-49ea-45f5-8c34-547075ba3c65\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9dqz9" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.534216 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/1eee0206-49ea-45f5-8c34-547075ba3c65-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-9dqz9\" (UID: \"1eee0206-49ea-45f5-8c34-547075ba3c65\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9dqz9" Nov 24 13:32:48 crc kubenswrapper[5039]: E1124 13:32:48.534251 5039 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 24 13:32:48 crc kubenswrapper[5039]: E1124 13:32:48.534316 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1eee0206-49ea-45f5-8c34-547075ba3c65-plugin-serving-cert podName:1eee0206-49ea-45f5-8c34-547075ba3c65 nodeName:}" failed. No retries permitted until 2025-11-24 13:32:49.03429541 +0000 UTC m=+881.473419910 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/1eee0206-49ea-45f5-8c34-547075ba3c65-plugin-serving-cert") pod "nmstate-console-plugin-5874bd7bc5-9dqz9" (UID: "1eee0206-49ea-45f5-8c34-547075ba3c65") : secret "plugin-serving-cert" not found Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.535156 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/1eee0206-49ea-45f5-8c34-547075ba3c65-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-9dqz9\" (UID: \"1eee0206-49ea-45f5-8c34-547075ba3c65\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9dqz9" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.553649 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-5748778ffb-d9m2t"] Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.554495 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.563489 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fnjvc" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.571534 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5748778ffb-d9m2t"] Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.572072 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcjkt\" (UniqueName: \"kubernetes.io/projected/1eee0206-49ea-45f5-8c34-547075ba3c65-kube-api-access-bcjkt\") pod \"nmstate-console-plugin-5874bd7bc5-9dqz9\" (UID: \"1eee0206-49ea-45f5-8c34-547075ba3c65\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9dqz9" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.584022 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-j8g7c" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.597366 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-ptxfm" Nov 24 13:32:48 crc kubenswrapper[5039]: W1124 13:32:48.632021 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode0e36cbb_009a_4784_a04d_95badbce22d0.slice/crio-e1dc1e4d0ee3eb9e9b661a8a95cae6135c9f0e3c690b0029b3ad2a4e786dbcb8 WatchSource:0}: Error finding container e1dc1e4d0ee3eb9e9b661a8a95cae6135c9f0e3c690b0029b3ad2a4e786dbcb8: Status 404 returned error can't find the container with id e1dc1e4d0ee3eb9e9b661a8a95cae6135c9f0e3c690b0029b3ad2a4e786dbcb8 Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.635037 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnvc6\" (UniqueName: \"kubernetes.io/projected/a1d34075-ef09-4cf1-8c85-3875cac010ea-kube-api-access-gnvc6\") pod \"console-5748778ffb-d9m2t\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.635079 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-trusted-ca-bundle\") pod \"console-5748778ffb-d9m2t\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.635124 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-service-ca\") pod \"console-5748778ffb-d9m2t\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.635162 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-console-config\") pod \"console-5748778ffb-d9m2t\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.635189 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a1d34075-ef09-4cf1-8c85-3875cac010ea-console-serving-cert\") pod \"console-5748778ffb-d9m2t\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.635220 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-oauth-serving-cert\") pod \"console-5748778ffb-d9m2t\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.635267 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a1d34075-ef09-4cf1-8c85-3875cac010ea-console-oauth-config\") pod \"console-5748778ffb-d9m2t\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.736535 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-service-ca\") pod \"console-5748778ffb-d9m2t\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.736592 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-console-config\") pod \"console-5748778ffb-d9m2t\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.736619 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a1d34075-ef09-4cf1-8c85-3875cac010ea-console-serving-cert\") pod \"console-5748778ffb-d9m2t\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.736648 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-oauth-serving-cert\") pod \"console-5748778ffb-d9m2t\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.736667 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a1d34075-ef09-4cf1-8c85-3875cac010ea-console-oauth-config\") pod \"console-5748778ffb-d9m2t\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.736693 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnvc6\" (UniqueName: \"kubernetes.io/projected/a1d34075-ef09-4cf1-8c85-3875cac010ea-kube-api-access-gnvc6\") pod \"console-5748778ffb-d9m2t\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.736721 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-trusted-ca-bundle\") pod \"console-5748778ffb-d9m2t\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.737498 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-service-ca\") pod \"console-5748778ffb-d9m2t\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.738173 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-oauth-serving-cert\") pod \"console-5748778ffb-d9m2t\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.738683 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-trusted-ca-bundle\") pod \"console-5748778ffb-d9m2t\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.738715 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-console-config\") pod \"console-5748778ffb-d9m2t\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.744322 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a1d34075-ef09-4cf1-8c85-3875cac010ea-console-serving-cert\") pod \"console-5748778ffb-d9m2t\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.751226 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a1d34075-ef09-4cf1-8c85-3875cac010ea-console-oauth-config\") pod \"console-5748778ffb-d9m2t\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.754492 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnvc6\" (UniqueName: \"kubernetes.io/projected/a1d34075-ef09-4cf1-8c85-3875cac010ea-kube-api-access-gnvc6\") pod \"console-5748778ffb-d9m2t\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.872863 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:48 crc kubenswrapper[5039]: I1124 13:32:48.961154 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-ptxfm" event={"ID":"e0e36cbb-009a-4784-a04d-95badbce22d0","Type":"ContainerStarted","Data":"e1dc1e4d0ee3eb9e9b661a8a95cae6135c9f0e3c690b0029b3ad2a4e786dbcb8"} Nov 24 13:32:49 crc kubenswrapper[5039]: I1124 13:32:49.025019 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-fnjvc"] Nov 24 13:32:49 crc kubenswrapper[5039]: W1124 13:32:49.033675 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf006222b_71be_4a99_9b20_e048040bd042.slice/crio-c8a8823ceaac91e23d7c58869b1a00622f286489930939824dca5c9c72b8cc7b WatchSource:0}: Error finding container c8a8823ceaac91e23d7c58869b1a00622f286489930939824dca5c9c72b8cc7b: Status 404 returned error can't find the container with id c8a8823ceaac91e23d7c58869b1a00622f286489930939824dca5c9c72b8cc7b Nov 24 13:32:49 crc kubenswrapper[5039]: I1124 13:32:49.041128 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/1eee0206-49ea-45f5-8c34-547075ba3c65-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-9dqz9\" (UID: \"1eee0206-49ea-45f5-8c34-547075ba3c65\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9dqz9" Nov 24 13:32:49 crc kubenswrapper[5039]: I1124 13:32:49.045358 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/1eee0206-49ea-45f5-8c34-547075ba3c65-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-9dqz9\" (UID: \"1eee0206-49ea-45f5-8c34-547075ba3c65\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9dqz9" Nov 24 13:32:49 crc kubenswrapper[5039]: I1124 13:32:49.072810 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-j8g7c"] Nov 24 13:32:49 crc kubenswrapper[5039]: W1124 13:32:49.077593 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod667e819e_83fc_453d_90d3_7b89b63e15a4.slice/crio-e4d3582ce7c6ad9e3a3d0dd1399f9197e03768502589390291da26d272b63281 WatchSource:0}: Error finding container e4d3582ce7c6ad9e3a3d0dd1399f9197e03768502589390291da26d272b63281: Status 404 returned error can't find the container with id e4d3582ce7c6ad9e3a3d0dd1399f9197e03768502589390291da26d272b63281 Nov 24 13:32:49 crc kubenswrapper[5039]: I1124 13:32:49.272665 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5748778ffb-d9m2t"] Nov 24 13:32:49 crc kubenswrapper[5039]: I1124 13:32:49.277671 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9dqz9" Nov 24 13:32:49 crc kubenswrapper[5039]: W1124 13:32:49.284542 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda1d34075_ef09_4cf1_8c85_3875cac010ea.slice/crio-94c64109a5ba71a2fd1f8fe83c34067087ba6d08eb9cd8d20e439ba10c77f5a5 WatchSource:0}: Error finding container 94c64109a5ba71a2fd1f8fe83c34067087ba6d08eb9cd8d20e439ba10c77f5a5: Status 404 returned error can't find the container with id 94c64109a5ba71a2fd1f8fe83c34067087ba6d08eb9cd8d20e439ba10c77f5a5 Nov 24 13:32:49 crc kubenswrapper[5039]: I1124 13:32:49.826036 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9dqz9"] Nov 24 13:32:49 crc kubenswrapper[5039]: I1124 13:32:49.969378 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fnjvc" event={"ID":"f006222b-71be-4a99-9b20-e048040bd042","Type":"ContainerStarted","Data":"c8a8823ceaac91e23d7c58869b1a00622f286489930939824dca5c9c72b8cc7b"} Nov 24 13:32:49 crc kubenswrapper[5039]: I1124 13:32:49.970560 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9dqz9" event={"ID":"1eee0206-49ea-45f5-8c34-547075ba3c65","Type":"ContainerStarted","Data":"65810d9a547487ec8a1c06040caf08b96c7bad7d88139af86416510e5d6e4df8"} Nov 24 13:32:49 crc kubenswrapper[5039]: I1124 13:32:49.972179 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5748778ffb-d9m2t" event={"ID":"a1d34075-ef09-4cf1-8c85-3875cac010ea","Type":"ContainerStarted","Data":"e67c0d158c8548889d2622364b83554c5fd42e6d215ae5dbc2639c933c8c5add"} Nov 24 13:32:49 crc kubenswrapper[5039]: I1124 13:32:49.972208 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5748778ffb-d9m2t" event={"ID":"a1d34075-ef09-4cf1-8c85-3875cac010ea","Type":"ContainerStarted","Data":"94c64109a5ba71a2fd1f8fe83c34067087ba6d08eb9cd8d20e439ba10c77f5a5"} Nov 24 13:32:49 crc kubenswrapper[5039]: I1124 13:32:49.974176 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-j8g7c" event={"ID":"667e819e-83fc-453d-90d3-7b89b63e15a4","Type":"ContainerStarted","Data":"e4d3582ce7c6ad9e3a3d0dd1399f9197e03768502589390291da26d272b63281"} Nov 24 13:32:50 crc kubenswrapper[5039]: I1124 13:32:50.001113 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-5748778ffb-d9m2t" podStartSLOduration=2.001091071 podStartE2EDuration="2.001091071s" podCreationTimestamp="2025-11-24 13:32:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:32:49.994125342 +0000 UTC m=+882.433249862" watchObservedRunningTime="2025-11-24 13:32:50.001091071 +0000 UTC m=+882.440215581" Nov 24 13:32:50 crc kubenswrapper[5039]: I1124 13:32:50.101442 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:32:50 crc kubenswrapper[5039]: I1124 13:32:50.101584 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:32:53 crc kubenswrapper[5039]: I1124 13:32:53.998596 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-j8g7c" event={"ID":"667e819e-83fc-453d-90d3-7b89b63e15a4","Type":"ContainerStarted","Data":"30c1b935a2cc2f2a498094736f5a51c21a8984d2d41e2c1baf59dbc6359799ab"} Nov 24 13:32:53 crc kubenswrapper[5039]: I1124 13:32:53.999114 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-j8g7c" Nov 24 13:32:53 crc kubenswrapper[5039]: I1124 13:32:53.999935 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fnjvc" event={"ID":"f006222b-71be-4a99-9b20-e048040bd042","Type":"ContainerStarted","Data":"cf4a47ab0a8123c233fe9831d63c5e9f7b4f903f1382c65f758b28175fd8f26a"} Nov 24 13:32:54 crc kubenswrapper[5039]: I1124 13:32:54.001713 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9dqz9" event={"ID":"1eee0206-49ea-45f5-8c34-547075ba3c65","Type":"ContainerStarted","Data":"59a2d2c131c032d07c3409b61cbeb022a735a4fc5eeb07c25dd45390956c13b5"} Nov 24 13:32:54 crc kubenswrapper[5039]: I1124 13:32:54.002968 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-ptxfm" event={"ID":"e0e36cbb-009a-4784-a04d-95badbce22d0","Type":"ContainerStarted","Data":"5a8ccf5eeaa654ba1cde876918c211dc97328c4387d2ff7a3b614f7c7d02db8d"} Nov 24 13:32:54 crc kubenswrapper[5039]: I1124 13:32:54.003067 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-ptxfm" Nov 24 13:32:54 crc kubenswrapper[5039]: I1124 13:32:54.015250 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-j8g7c" podStartSLOduration=1.805382955 podStartE2EDuration="6.015230087s" podCreationTimestamp="2025-11-24 13:32:48 +0000 UTC" firstStartedPulling="2025-11-24 13:32:49.081256725 +0000 UTC m=+881.520381225" lastFinishedPulling="2025-11-24 13:32:53.291103817 +0000 UTC m=+885.730228357" observedRunningTime="2025-11-24 13:32:54.012297987 +0000 UTC m=+886.451422487" watchObservedRunningTime="2025-11-24 13:32:54.015230087 +0000 UTC m=+886.454354587" Nov 24 13:32:54 crc kubenswrapper[5039]: I1124 13:32:54.033634 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9dqz9" podStartSLOduration=2.582871459 podStartE2EDuration="6.033615702s" podCreationTimestamp="2025-11-24 13:32:48 +0000 UTC" firstStartedPulling="2025-11-24 13:32:49.838435976 +0000 UTC m=+882.277560476" lastFinishedPulling="2025-11-24 13:32:53.289180189 +0000 UTC m=+885.728304719" observedRunningTime="2025-11-24 13:32:54.031604924 +0000 UTC m=+886.470729434" watchObservedRunningTime="2025-11-24 13:32:54.033615702 +0000 UTC m=+886.472740202" Nov 24 13:32:54 crc kubenswrapper[5039]: I1124 13:32:54.072817 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-ptxfm" podStartSLOduration=1.417004439 podStartE2EDuration="6.07279791s" podCreationTimestamp="2025-11-24 13:32:48 +0000 UTC" firstStartedPulling="2025-11-24 13:32:48.634359052 +0000 UTC m=+881.073483552" lastFinishedPulling="2025-11-24 13:32:53.290152493 +0000 UTC m=+885.729277023" observedRunningTime="2025-11-24 13:32:54.06945826 +0000 UTC m=+886.508582770" watchObservedRunningTime="2025-11-24 13:32:54.07279791 +0000 UTC m=+886.511922410" Nov 24 13:32:57 crc kubenswrapper[5039]: I1124 13:32:57.023627 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fnjvc" event={"ID":"f006222b-71be-4a99-9b20-e048040bd042","Type":"ContainerStarted","Data":"d8ad940f28fd91122de4b2bdbd2e4416c5df46ee87358c6c01d2432c31ead11a"} Nov 24 13:32:57 crc kubenswrapper[5039]: I1124 13:32:57.059749 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fnjvc" podStartSLOduration=1.316572378 podStartE2EDuration="9.059718581s" podCreationTimestamp="2025-11-24 13:32:48 +0000 UTC" firstStartedPulling="2025-11-24 13:32:49.037963637 +0000 UTC m=+881.477088137" lastFinishedPulling="2025-11-24 13:32:56.78110983 +0000 UTC m=+889.220234340" observedRunningTime="2025-11-24 13:32:57.051292788 +0000 UTC m=+889.490417288" watchObservedRunningTime="2025-11-24 13:32:57.059718581 +0000 UTC m=+889.498843111" Nov 24 13:32:58 crc kubenswrapper[5039]: I1124 13:32:58.626720 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-ptxfm" Nov 24 13:32:58 crc kubenswrapper[5039]: I1124 13:32:58.873036 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:58 crc kubenswrapper[5039]: I1124 13:32:58.873094 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:58 crc kubenswrapper[5039]: I1124 13:32:58.877229 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:59 crc kubenswrapper[5039]: I1124 13:32:59.040846 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:32:59 crc kubenswrapper[5039]: I1124 13:32:59.082387 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-zqgfl"] Nov 24 13:33:08 crc kubenswrapper[5039]: I1124 13:33:08.594379 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-j8g7c" Nov 24 13:33:20 crc kubenswrapper[5039]: I1124 13:33:20.101449 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:33:20 crc kubenswrapper[5039]: I1124 13:33:20.102070 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:33:23 crc kubenswrapper[5039]: I1124 13:33:23.535221 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk"] Nov 24 13:33:23 crc kubenswrapper[5039]: I1124 13:33:23.538377 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk" Nov 24 13:33:23 crc kubenswrapper[5039]: I1124 13:33:23.547853 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk\" (UID: \"8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk" Nov 24 13:33:23 crc kubenswrapper[5039]: I1124 13:33:23.547931 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5r6tq\" (UniqueName: \"kubernetes.io/projected/8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130-kube-api-access-5r6tq\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk\" (UID: \"8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk" Nov 24 13:33:23 crc kubenswrapper[5039]: I1124 13:33:23.548014 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk\" (UID: \"8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk" Nov 24 13:33:23 crc kubenswrapper[5039]: I1124 13:33:23.556919 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 24 13:33:23 crc kubenswrapper[5039]: I1124 13:33:23.558810 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk"] Nov 24 13:33:23 crc kubenswrapper[5039]: I1124 13:33:23.649745 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5r6tq\" (UniqueName: \"kubernetes.io/projected/8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130-kube-api-access-5r6tq\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk\" (UID: \"8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk" Nov 24 13:33:23 crc kubenswrapper[5039]: I1124 13:33:23.649873 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk\" (UID: \"8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk" Nov 24 13:33:23 crc kubenswrapper[5039]: I1124 13:33:23.649946 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk\" (UID: \"8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk" Nov 24 13:33:23 crc kubenswrapper[5039]: I1124 13:33:23.650457 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk\" (UID: \"8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk" Nov 24 13:33:23 crc kubenswrapper[5039]: I1124 13:33:23.650524 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk\" (UID: \"8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk" Nov 24 13:33:23 crc kubenswrapper[5039]: I1124 13:33:23.675602 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5r6tq\" (UniqueName: \"kubernetes.io/projected/8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130-kube-api-access-5r6tq\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk\" (UID: \"8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk" Nov 24 13:33:23 crc kubenswrapper[5039]: I1124 13:33:23.858816 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk" Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.125183 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-zqgfl" podUID="663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2" containerName="console" containerID="cri-o://14e6cf73b8d10e0900286ffe41debff82c381e768307053485d0b5094ca2af67" gracePeriod=15 Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.287773 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk"] Nov 24 13:33:24 crc kubenswrapper[5039]: W1124 13:33:24.292591 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8a7b6ca0_68e4_44d2_a9b4_d0c4a9198130.slice/crio-d3132d8ccb23b9957d554ffc5046fbebcb38404c1c93f81739c9c4f05305fb8d WatchSource:0}: Error finding container d3132d8ccb23b9957d554ffc5046fbebcb38404c1c93f81739c9c4f05305fb8d: Status 404 returned error can't find the container with id d3132d8ccb23b9957d554ffc5046fbebcb38404c1c93f81739c9c4f05305fb8d Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.528886 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-zqgfl_663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2/console/0.log" Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.528948 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.567120 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-console-config\") pod \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.567166 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-console-oauth-config\") pod \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.567239 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7tbf\" (UniqueName: \"kubernetes.io/projected/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-kube-api-access-v7tbf\") pod \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.567321 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-console-serving-cert\") pod \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.567372 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-oauth-serving-cert\") pod \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.567402 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-trusted-ca-bundle\") pod \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.567428 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-service-ca\") pod \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\" (UID: \"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2\") " Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.567939 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-console-config" (OuterVolumeSpecName: "console-config") pod "663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2" (UID: "663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.568285 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-service-ca" (OuterVolumeSpecName: "service-ca") pod "663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2" (UID: "663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.568628 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2" (UID: "663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.568809 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2" (UID: "663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.573074 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-kube-api-access-v7tbf" (OuterVolumeSpecName: "kube-api-access-v7tbf") pod "663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2" (UID: "663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2"). InnerVolumeSpecName "kube-api-access-v7tbf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.573163 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2" (UID: "663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.573624 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2" (UID: "663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.669444 5039 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.669490 5039 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.669517 5039 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.669527 5039 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.669539 5039 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-console-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.669550 5039 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:33:24 crc kubenswrapper[5039]: I1124 13:33:24.669562 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7tbf\" (UniqueName: \"kubernetes.io/projected/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2-kube-api-access-v7tbf\") on node \"crc\" DevicePath \"\"" Nov 24 13:33:25 crc kubenswrapper[5039]: I1124 13:33:25.229172 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-zqgfl_663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2/console/0.log" Nov 24 13:33:25 crc kubenswrapper[5039]: I1124 13:33:25.229557 5039 generic.go:334] "Generic (PLEG): container finished" podID="663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2" containerID="14e6cf73b8d10e0900286ffe41debff82c381e768307053485d0b5094ca2af67" exitCode=2 Nov 24 13:33:25 crc kubenswrapper[5039]: I1124 13:33:25.229739 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-zqgfl" Nov 24 13:33:25 crc kubenswrapper[5039]: I1124 13:33:25.229893 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-zqgfl" event={"ID":"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2","Type":"ContainerDied","Data":"14e6cf73b8d10e0900286ffe41debff82c381e768307053485d0b5094ca2af67"} Nov 24 13:33:25 crc kubenswrapper[5039]: I1124 13:33:25.230435 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-zqgfl" event={"ID":"663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2","Type":"ContainerDied","Data":"1c41a032064e74af8db8cc0bb155a375301bfa9999813a444502e63d675c3992"} Nov 24 13:33:25 crc kubenswrapper[5039]: I1124 13:33:25.230473 5039 scope.go:117] "RemoveContainer" containerID="14e6cf73b8d10e0900286ffe41debff82c381e768307053485d0b5094ca2af67" Nov 24 13:33:25 crc kubenswrapper[5039]: I1124 13:33:25.232125 5039 generic.go:334] "Generic (PLEG): container finished" podID="8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130" containerID="4f2839ba436cd6fb6cecc587db69bb8ae79a86297de79ede8d5befa743122fa1" exitCode=0 Nov 24 13:33:25 crc kubenswrapper[5039]: I1124 13:33:25.232162 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk" event={"ID":"8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130","Type":"ContainerDied","Data":"4f2839ba436cd6fb6cecc587db69bb8ae79a86297de79ede8d5befa743122fa1"} Nov 24 13:33:25 crc kubenswrapper[5039]: I1124 13:33:25.232182 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk" event={"ID":"8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130","Type":"ContainerStarted","Data":"d3132d8ccb23b9957d554ffc5046fbebcb38404c1c93f81739c9c4f05305fb8d"} Nov 24 13:33:25 crc kubenswrapper[5039]: I1124 13:33:25.235437 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 13:33:25 crc kubenswrapper[5039]: I1124 13:33:25.251212 5039 scope.go:117] "RemoveContainer" containerID="14e6cf73b8d10e0900286ffe41debff82c381e768307053485d0b5094ca2af67" Nov 24 13:33:25 crc kubenswrapper[5039]: E1124 13:33:25.251847 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14e6cf73b8d10e0900286ffe41debff82c381e768307053485d0b5094ca2af67\": container with ID starting with 14e6cf73b8d10e0900286ffe41debff82c381e768307053485d0b5094ca2af67 not found: ID does not exist" containerID="14e6cf73b8d10e0900286ffe41debff82c381e768307053485d0b5094ca2af67" Nov 24 13:33:25 crc kubenswrapper[5039]: I1124 13:33:25.251887 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14e6cf73b8d10e0900286ffe41debff82c381e768307053485d0b5094ca2af67"} err="failed to get container status \"14e6cf73b8d10e0900286ffe41debff82c381e768307053485d0b5094ca2af67\": rpc error: code = NotFound desc = could not find container \"14e6cf73b8d10e0900286ffe41debff82c381e768307053485d0b5094ca2af67\": container with ID starting with 14e6cf73b8d10e0900286ffe41debff82c381e768307053485d0b5094ca2af67 not found: ID does not exist" Nov 24 13:33:25 crc kubenswrapper[5039]: I1124 13:33:25.283895 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-zqgfl"] Nov 24 13:33:25 crc kubenswrapper[5039]: I1124 13:33:25.289181 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-zqgfl"] Nov 24 13:33:26 crc kubenswrapper[5039]: I1124 13:33:26.316648 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2" path="/var/lib/kubelet/pods/663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2/volumes" Nov 24 13:33:27 crc kubenswrapper[5039]: I1124 13:33:27.250779 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk" event={"ID":"8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130","Type":"ContainerStarted","Data":"82aabced5d89d918f39baaf6e96b275ba6972a031326065b6354d399e4c170b5"} Nov 24 13:33:28 crc kubenswrapper[5039]: I1124 13:33:28.261371 5039 generic.go:334] "Generic (PLEG): container finished" podID="8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130" containerID="82aabced5d89d918f39baaf6e96b275ba6972a031326065b6354d399e4c170b5" exitCode=0 Nov 24 13:33:28 crc kubenswrapper[5039]: I1124 13:33:28.261480 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk" event={"ID":"8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130","Type":"ContainerDied","Data":"82aabced5d89d918f39baaf6e96b275ba6972a031326065b6354d399e4c170b5"} Nov 24 13:33:29 crc kubenswrapper[5039]: I1124 13:33:29.270430 5039 generic.go:334] "Generic (PLEG): container finished" podID="8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130" containerID="578fa7a808007e260a36ff66da1111b92a9114c6095075d16ac3c0687ab80cfc" exitCode=0 Nov 24 13:33:29 crc kubenswrapper[5039]: I1124 13:33:29.270820 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk" event={"ID":"8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130","Type":"ContainerDied","Data":"578fa7a808007e260a36ff66da1111b92a9114c6095075d16ac3c0687ab80cfc"} Nov 24 13:33:30 crc kubenswrapper[5039]: I1124 13:33:30.548459 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk" Nov 24 13:33:30 crc kubenswrapper[5039]: I1124 13:33:30.649070 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5r6tq\" (UniqueName: \"kubernetes.io/projected/8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130-kube-api-access-5r6tq\") pod \"8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130\" (UID: \"8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130\") " Nov 24 13:33:30 crc kubenswrapper[5039]: I1124 13:33:30.649188 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130-util\") pod \"8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130\" (UID: \"8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130\") " Nov 24 13:33:30 crc kubenswrapper[5039]: I1124 13:33:30.649255 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130-bundle\") pod \"8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130\" (UID: \"8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130\") " Nov 24 13:33:30 crc kubenswrapper[5039]: I1124 13:33:30.650395 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130-bundle" (OuterVolumeSpecName: "bundle") pod "8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130" (UID: "8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:33:30 crc kubenswrapper[5039]: I1124 13:33:30.656661 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130-kube-api-access-5r6tq" (OuterVolumeSpecName: "kube-api-access-5r6tq") pod "8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130" (UID: "8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130"). InnerVolumeSpecName "kube-api-access-5r6tq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:33:30 crc kubenswrapper[5039]: I1124 13:33:30.659681 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130-util" (OuterVolumeSpecName: "util") pod "8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130" (UID: "8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:33:30 crc kubenswrapper[5039]: I1124 13:33:30.750974 5039 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:33:30 crc kubenswrapper[5039]: I1124 13:33:30.751029 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5r6tq\" (UniqueName: \"kubernetes.io/projected/8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130-kube-api-access-5r6tq\") on node \"crc\" DevicePath \"\"" Nov 24 13:33:30 crc kubenswrapper[5039]: I1124 13:33:30.751042 5039 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130-util\") on node \"crc\" DevicePath \"\"" Nov 24 13:33:31 crc kubenswrapper[5039]: I1124 13:33:31.287180 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk" event={"ID":"8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130","Type":"ContainerDied","Data":"d3132d8ccb23b9957d554ffc5046fbebcb38404c1c93f81739c9c4f05305fb8d"} Nov 24 13:33:31 crc kubenswrapper[5039]: I1124 13:33:31.287806 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d3132d8ccb23b9957d554ffc5046fbebcb38404c1c93f81739c9c4f05305fb8d" Nov 24 13:33:31 crc kubenswrapper[5039]: I1124 13:33:31.287237 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.691261 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-859686c6ff-hskrv"] Nov 24 13:33:41 crc kubenswrapper[5039]: E1124 13:33:41.691876 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130" containerName="pull" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.691894 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130" containerName="pull" Nov 24 13:33:41 crc kubenswrapper[5039]: E1124 13:33:41.691909 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130" containerName="util" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.691917 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130" containerName="util" Nov 24 13:33:41 crc kubenswrapper[5039]: E1124 13:33:41.691931 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2" containerName="console" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.691939 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2" containerName="console" Nov 24 13:33:41 crc kubenswrapper[5039]: E1124 13:33:41.691954 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130" containerName="extract" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.691961 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130" containerName="extract" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.692117 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130" containerName="extract" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.692133 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="663c85c0-47d0-4d1f-a8fd-da3c94b1d2c2" containerName="console" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.692773 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-859686c6ff-hskrv" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.695521 5039 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-6gdzq" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.695533 5039 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.696430 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.696555 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.698192 5039 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.712725 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-859686c6ff-hskrv"] Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.798139 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m52t8\" (UniqueName: \"kubernetes.io/projected/4af724f9-39c3-414e-a020-29da6a5bfac7-kube-api-access-m52t8\") pod \"metallb-operator-controller-manager-859686c6ff-hskrv\" (UID: \"4af724f9-39c3-414e-a020-29da6a5bfac7\") " pod="metallb-system/metallb-operator-controller-manager-859686c6ff-hskrv" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.798219 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4af724f9-39c3-414e-a020-29da6a5bfac7-apiservice-cert\") pod \"metallb-operator-controller-manager-859686c6ff-hskrv\" (UID: \"4af724f9-39c3-414e-a020-29da6a5bfac7\") " pod="metallb-system/metallb-operator-controller-manager-859686c6ff-hskrv" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.798411 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4af724f9-39c3-414e-a020-29da6a5bfac7-webhook-cert\") pod \"metallb-operator-controller-manager-859686c6ff-hskrv\" (UID: \"4af724f9-39c3-414e-a020-29da6a5bfac7\") " pod="metallb-system/metallb-operator-controller-manager-859686c6ff-hskrv" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.900336 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4af724f9-39c3-414e-a020-29da6a5bfac7-webhook-cert\") pod \"metallb-operator-controller-manager-859686c6ff-hskrv\" (UID: \"4af724f9-39c3-414e-a020-29da6a5bfac7\") " pod="metallb-system/metallb-operator-controller-manager-859686c6ff-hskrv" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.900445 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m52t8\" (UniqueName: \"kubernetes.io/projected/4af724f9-39c3-414e-a020-29da6a5bfac7-kube-api-access-m52t8\") pod \"metallb-operator-controller-manager-859686c6ff-hskrv\" (UID: \"4af724f9-39c3-414e-a020-29da6a5bfac7\") " pod="metallb-system/metallb-operator-controller-manager-859686c6ff-hskrv" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.900496 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4af724f9-39c3-414e-a020-29da6a5bfac7-apiservice-cert\") pod \"metallb-operator-controller-manager-859686c6ff-hskrv\" (UID: \"4af724f9-39c3-414e-a020-29da6a5bfac7\") " pod="metallb-system/metallb-operator-controller-manager-859686c6ff-hskrv" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.911471 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4af724f9-39c3-414e-a020-29da6a5bfac7-apiservice-cert\") pod \"metallb-operator-controller-manager-859686c6ff-hskrv\" (UID: \"4af724f9-39c3-414e-a020-29da6a5bfac7\") " pod="metallb-system/metallb-operator-controller-manager-859686c6ff-hskrv" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.911472 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4af724f9-39c3-414e-a020-29da6a5bfac7-webhook-cert\") pod \"metallb-operator-controller-manager-859686c6ff-hskrv\" (UID: \"4af724f9-39c3-414e-a020-29da6a5bfac7\") " pod="metallb-system/metallb-operator-controller-manager-859686c6ff-hskrv" Nov 24 13:33:41 crc kubenswrapper[5039]: I1124 13:33:41.922805 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m52t8\" (UniqueName: \"kubernetes.io/projected/4af724f9-39c3-414e-a020-29da6a5bfac7-kube-api-access-m52t8\") pod \"metallb-operator-controller-manager-859686c6ff-hskrv\" (UID: \"4af724f9-39c3-414e-a020-29da6a5bfac7\") " pod="metallb-system/metallb-operator-controller-manager-859686c6ff-hskrv" Nov 24 13:33:42 crc kubenswrapper[5039]: I1124 13:33:42.008547 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-76f55458ff-26z8q"] Nov 24 13:33:42 crc kubenswrapper[5039]: I1124 13:33:42.009602 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-76f55458ff-26z8q" Nov 24 13:33:42 crc kubenswrapper[5039]: I1124 13:33:42.011351 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-859686c6ff-hskrv" Nov 24 13:33:42 crc kubenswrapper[5039]: I1124 13:33:42.014780 5039 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 24 13:33:42 crc kubenswrapper[5039]: I1124 13:33:42.015093 5039 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 24 13:33:42 crc kubenswrapper[5039]: I1124 13:33:42.015877 5039 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-wj66g" Nov 24 13:33:42 crc kubenswrapper[5039]: I1124 13:33:42.026345 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-76f55458ff-26z8q"] Nov 24 13:33:42 crc kubenswrapper[5039]: I1124 13:33:42.103260 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/18cdfd31-117a-4b07-bdba-fc6703fcfa55-apiservice-cert\") pod \"metallb-operator-webhook-server-76f55458ff-26z8q\" (UID: \"18cdfd31-117a-4b07-bdba-fc6703fcfa55\") " pod="metallb-system/metallb-operator-webhook-server-76f55458ff-26z8q" Nov 24 13:33:42 crc kubenswrapper[5039]: I1124 13:33:42.103343 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/18cdfd31-117a-4b07-bdba-fc6703fcfa55-webhook-cert\") pod \"metallb-operator-webhook-server-76f55458ff-26z8q\" (UID: \"18cdfd31-117a-4b07-bdba-fc6703fcfa55\") " pod="metallb-system/metallb-operator-webhook-server-76f55458ff-26z8q" Nov 24 13:33:42 crc kubenswrapper[5039]: I1124 13:33:42.103401 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skjf2\" (UniqueName: \"kubernetes.io/projected/18cdfd31-117a-4b07-bdba-fc6703fcfa55-kube-api-access-skjf2\") pod \"metallb-operator-webhook-server-76f55458ff-26z8q\" (UID: \"18cdfd31-117a-4b07-bdba-fc6703fcfa55\") " pod="metallb-system/metallb-operator-webhook-server-76f55458ff-26z8q" Nov 24 13:33:42 crc kubenswrapper[5039]: I1124 13:33:42.205339 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/18cdfd31-117a-4b07-bdba-fc6703fcfa55-apiservice-cert\") pod \"metallb-operator-webhook-server-76f55458ff-26z8q\" (UID: \"18cdfd31-117a-4b07-bdba-fc6703fcfa55\") " pod="metallb-system/metallb-operator-webhook-server-76f55458ff-26z8q" Nov 24 13:33:42 crc kubenswrapper[5039]: I1124 13:33:42.205420 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/18cdfd31-117a-4b07-bdba-fc6703fcfa55-webhook-cert\") pod \"metallb-operator-webhook-server-76f55458ff-26z8q\" (UID: \"18cdfd31-117a-4b07-bdba-fc6703fcfa55\") " pod="metallb-system/metallb-operator-webhook-server-76f55458ff-26z8q" Nov 24 13:33:42 crc kubenswrapper[5039]: I1124 13:33:42.205456 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skjf2\" (UniqueName: \"kubernetes.io/projected/18cdfd31-117a-4b07-bdba-fc6703fcfa55-kube-api-access-skjf2\") pod \"metallb-operator-webhook-server-76f55458ff-26z8q\" (UID: \"18cdfd31-117a-4b07-bdba-fc6703fcfa55\") " pod="metallb-system/metallb-operator-webhook-server-76f55458ff-26z8q" Nov 24 13:33:42 crc kubenswrapper[5039]: I1124 13:33:42.213445 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/18cdfd31-117a-4b07-bdba-fc6703fcfa55-webhook-cert\") pod \"metallb-operator-webhook-server-76f55458ff-26z8q\" (UID: \"18cdfd31-117a-4b07-bdba-fc6703fcfa55\") " pod="metallb-system/metallb-operator-webhook-server-76f55458ff-26z8q" Nov 24 13:33:42 crc kubenswrapper[5039]: I1124 13:33:42.222437 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/18cdfd31-117a-4b07-bdba-fc6703fcfa55-apiservice-cert\") pod \"metallb-operator-webhook-server-76f55458ff-26z8q\" (UID: \"18cdfd31-117a-4b07-bdba-fc6703fcfa55\") " pod="metallb-system/metallb-operator-webhook-server-76f55458ff-26z8q" Nov 24 13:33:42 crc kubenswrapper[5039]: I1124 13:33:42.234240 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skjf2\" (UniqueName: \"kubernetes.io/projected/18cdfd31-117a-4b07-bdba-fc6703fcfa55-kube-api-access-skjf2\") pod \"metallb-operator-webhook-server-76f55458ff-26z8q\" (UID: \"18cdfd31-117a-4b07-bdba-fc6703fcfa55\") " pod="metallb-system/metallb-operator-webhook-server-76f55458ff-26z8q" Nov 24 13:33:42 crc kubenswrapper[5039]: I1124 13:33:42.327358 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-76f55458ff-26z8q" Nov 24 13:33:42 crc kubenswrapper[5039]: I1124 13:33:42.539626 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-859686c6ff-hskrv"] Nov 24 13:33:42 crc kubenswrapper[5039]: I1124 13:33:42.761593 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-76f55458ff-26z8q"] Nov 24 13:33:42 crc kubenswrapper[5039]: W1124 13:33:42.767671 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18cdfd31_117a_4b07_bdba_fc6703fcfa55.slice/crio-61523cf2f702f4c2453fae5a7d5bb30ec5a586adc8deabc3c15058f38bed6b86 WatchSource:0}: Error finding container 61523cf2f702f4c2453fae5a7d5bb30ec5a586adc8deabc3c15058f38bed6b86: Status 404 returned error can't find the container with id 61523cf2f702f4c2453fae5a7d5bb30ec5a586adc8deabc3c15058f38bed6b86 Nov 24 13:33:43 crc kubenswrapper[5039]: I1124 13:33:43.372522 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-76f55458ff-26z8q" event={"ID":"18cdfd31-117a-4b07-bdba-fc6703fcfa55","Type":"ContainerStarted","Data":"61523cf2f702f4c2453fae5a7d5bb30ec5a586adc8deabc3c15058f38bed6b86"} Nov 24 13:33:43 crc kubenswrapper[5039]: I1124 13:33:43.373837 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-859686c6ff-hskrv" event={"ID":"4af724f9-39c3-414e-a020-29da6a5bfac7","Type":"ContainerStarted","Data":"e493c917385b7f84a77af0c0c82f24ec43d926fed893e8d74ebe6cff66561470"} Nov 24 13:33:48 crc kubenswrapper[5039]: I1124 13:33:48.411285 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-76f55458ff-26z8q" event={"ID":"18cdfd31-117a-4b07-bdba-fc6703fcfa55","Type":"ContainerStarted","Data":"effb7ffb0bcdcd4ca12b452dafa9531ae75f58c28a7fcb5e444073f6c5654519"} Nov 24 13:33:48 crc kubenswrapper[5039]: I1124 13:33:48.411787 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-76f55458ff-26z8q" Nov 24 13:33:48 crc kubenswrapper[5039]: I1124 13:33:48.412482 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-859686c6ff-hskrv" event={"ID":"4af724f9-39c3-414e-a020-29da6a5bfac7","Type":"ContainerStarted","Data":"b2bd9ce0cf6d3404a7c92d8f537297e143218dcb2b99bdd4588857ceb59d9df5"} Nov 24 13:33:48 crc kubenswrapper[5039]: I1124 13:33:48.412860 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-859686c6ff-hskrv" Nov 24 13:33:48 crc kubenswrapper[5039]: I1124 13:33:48.431191 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-76f55458ff-26z8q" podStartSLOduration=2.080765519 podStartE2EDuration="7.431172241s" podCreationTimestamp="2025-11-24 13:33:41 +0000 UTC" firstStartedPulling="2025-11-24 13:33:42.77171393 +0000 UTC m=+935.210838430" lastFinishedPulling="2025-11-24 13:33:48.122120652 +0000 UTC m=+940.561245152" observedRunningTime="2025-11-24 13:33:48.426174941 +0000 UTC m=+940.865299441" watchObservedRunningTime="2025-11-24 13:33:48.431172241 +0000 UTC m=+940.870296731" Nov 24 13:33:48 crc kubenswrapper[5039]: I1124 13:33:48.447808 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-859686c6ff-hskrv" podStartSLOduration=2.173604959 podStartE2EDuration="7.447786632s" podCreationTimestamp="2025-11-24 13:33:41 +0000 UTC" firstStartedPulling="2025-11-24 13:33:42.55174617 +0000 UTC m=+934.990870670" lastFinishedPulling="2025-11-24 13:33:47.825927843 +0000 UTC m=+940.265052343" observedRunningTime="2025-11-24 13:33:48.44393178 +0000 UTC m=+940.883056280" watchObservedRunningTime="2025-11-24 13:33:48.447786632 +0000 UTC m=+940.886911132" Nov 24 13:33:50 crc kubenswrapper[5039]: I1124 13:33:50.102637 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:33:50 crc kubenswrapper[5039]: I1124 13:33:50.102709 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:33:50 crc kubenswrapper[5039]: I1124 13:33:50.102752 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:33:50 crc kubenswrapper[5039]: I1124 13:33:50.103364 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"066219b28d99610e4d1092b8b5a95d47b8b9f6102be58f3694f6d12e791d5f0b"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 13:33:50 crc kubenswrapper[5039]: I1124 13:33:50.103757 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://066219b28d99610e4d1092b8b5a95d47b8b9f6102be58f3694f6d12e791d5f0b" gracePeriod=600 Nov 24 13:33:50 crc kubenswrapper[5039]: I1124 13:33:50.428366 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="066219b28d99610e4d1092b8b5a95d47b8b9f6102be58f3694f6d12e791d5f0b" exitCode=0 Nov 24 13:33:50 crc kubenswrapper[5039]: I1124 13:33:50.428443 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"066219b28d99610e4d1092b8b5a95d47b8b9f6102be58f3694f6d12e791d5f0b"} Nov 24 13:33:50 crc kubenswrapper[5039]: I1124 13:33:50.428491 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"16f5b0fb44ff36ed732d98fa0d4391bb1a697e230891b1a79ab6e7366f72ba49"} Nov 24 13:33:50 crc kubenswrapper[5039]: I1124 13:33:50.428528 5039 scope.go:117] "RemoveContainer" containerID="ad5ffc63035c78c438991177870b3e0e28e428524aad180cebafc49a63fbdb72" Nov 24 13:34:02 crc kubenswrapper[5039]: I1124 13:34:02.348260 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-76f55458ff-26z8q" Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.014143 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-859686c6ff-hskrv" Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.888369 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-kn25p"] Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.890325 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-kn25p" Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.893638 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-2zrlg"] Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.896346 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.897386 5039 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.897638 5039 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-x77dj" Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.898052 5039 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.898122 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.906698 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-kn25p"] Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.970104 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/a6e7ae26-ae37-48c9-97ca-917a4e92a535-frr-conf\") pod \"frr-k8s-2zrlg\" (UID: \"a6e7ae26-ae37-48c9-97ca-917a4e92a535\") " pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.970156 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/a6e7ae26-ae37-48c9-97ca-917a4e92a535-frr-sockets\") pod \"frr-k8s-2zrlg\" (UID: \"a6e7ae26-ae37-48c9-97ca-917a4e92a535\") " pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.970188 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kv8s\" (UniqueName: \"kubernetes.io/projected/d341f082-ff80-43f0-aa5c-1476f8addb05-kube-api-access-2kv8s\") pod \"frr-k8s-webhook-server-6998585d5-kn25p\" (UID: \"d341f082-ff80-43f0-aa5c-1476f8addb05\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-kn25p" Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.970266 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d341f082-ff80-43f0-aa5c-1476f8addb05-cert\") pod \"frr-k8s-webhook-server-6998585d5-kn25p\" (UID: \"d341f082-ff80-43f0-aa5c-1476f8addb05\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-kn25p" Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.970319 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xfmd\" (UniqueName: \"kubernetes.io/projected/a6e7ae26-ae37-48c9-97ca-917a4e92a535-kube-api-access-7xfmd\") pod \"frr-k8s-2zrlg\" (UID: \"a6e7ae26-ae37-48c9-97ca-917a4e92a535\") " pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.970346 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/a6e7ae26-ae37-48c9-97ca-917a4e92a535-metrics\") pod \"frr-k8s-2zrlg\" (UID: \"a6e7ae26-ae37-48c9-97ca-917a4e92a535\") " pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.970408 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/a6e7ae26-ae37-48c9-97ca-917a4e92a535-reloader\") pod \"frr-k8s-2zrlg\" (UID: \"a6e7ae26-ae37-48c9-97ca-917a4e92a535\") " pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.970460 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a6e7ae26-ae37-48c9-97ca-917a4e92a535-metrics-certs\") pod \"frr-k8s-2zrlg\" (UID: \"a6e7ae26-ae37-48c9-97ca-917a4e92a535\") " pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.970641 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/a6e7ae26-ae37-48c9-97ca-917a4e92a535-frr-startup\") pod \"frr-k8s-2zrlg\" (UID: \"a6e7ae26-ae37-48c9-97ca-917a4e92a535\") " pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.980658 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-7wr9z"] Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.981721 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-7wr9z" Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.987693 5039 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.988692 5039 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-2k7tk" Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.988790 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 24 13:34:22 crc kubenswrapper[5039]: I1124 13:34:22.990956 5039 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.009053 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-m58j2"] Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.010412 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-m58j2" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.015756 5039 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.030352 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-m58j2"] Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.072634 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/a6e7ae26-ae37-48c9-97ca-917a4e92a535-frr-startup\") pod \"frr-k8s-2zrlg\" (UID: \"a6e7ae26-ae37-48c9-97ca-917a4e92a535\") " pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.072689 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dl7mc\" (UniqueName: \"kubernetes.io/projected/3e29f306-3558-4854-9ada-3ff94d2ad700-kube-api-access-dl7mc\") pod \"controller-6c7b4b5f48-m58j2\" (UID: \"3e29f306-3558-4854-9ada-3ff94d2ad700\") " pod="metallb-system/controller-6c7b4b5f48-m58j2" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.072717 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/a6e7ae26-ae37-48c9-97ca-917a4e92a535-frr-conf\") pod \"frr-k8s-2zrlg\" (UID: \"a6e7ae26-ae37-48c9-97ca-917a4e92a535\") " pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.072740 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/a6e7ae26-ae37-48c9-97ca-917a4e92a535-frr-sockets\") pod \"frr-k8s-2zrlg\" (UID: \"a6e7ae26-ae37-48c9-97ca-917a4e92a535\") " pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.072763 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kv8s\" (UniqueName: \"kubernetes.io/projected/d341f082-ff80-43f0-aa5c-1476f8addb05-kube-api-access-2kv8s\") pod \"frr-k8s-webhook-server-6998585d5-kn25p\" (UID: \"d341f082-ff80-43f0-aa5c-1476f8addb05\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-kn25p" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.072788 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3e29f306-3558-4854-9ada-3ff94d2ad700-metrics-certs\") pod \"controller-6c7b4b5f48-m58j2\" (UID: \"3e29f306-3558-4854-9ada-3ff94d2ad700\") " pod="metallb-system/controller-6c7b4b5f48-m58j2" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.072805 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d341f082-ff80-43f0-aa5c-1476f8addb05-cert\") pod \"frr-k8s-webhook-server-6998585d5-kn25p\" (UID: \"d341f082-ff80-43f0-aa5c-1476f8addb05\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-kn25p" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.072831 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xfmd\" (UniqueName: \"kubernetes.io/projected/a6e7ae26-ae37-48c9-97ca-917a4e92a535-kube-api-access-7xfmd\") pod \"frr-k8s-2zrlg\" (UID: \"a6e7ae26-ae37-48c9-97ca-917a4e92a535\") " pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.072849 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/a6e7ae26-ae37-48c9-97ca-917a4e92a535-metrics\") pod \"frr-k8s-2zrlg\" (UID: \"a6e7ae26-ae37-48c9-97ca-917a4e92a535\") " pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.072877 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mtxst\" (UniqueName: \"kubernetes.io/projected/fa0ca8a9-96d3-40dc-916f-97048b7112b0-kube-api-access-mtxst\") pod \"speaker-7wr9z\" (UID: \"fa0ca8a9-96d3-40dc-916f-97048b7112b0\") " pod="metallb-system/speaker-7wr9z" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.072902 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/fa0ca8a9-96d3-40dc-916f-97048b7112b0-metallb-excludel2\") pod \"speaker-7wr9z\" (UID: \"fa0ca8a9-96d3-40dc-916f-97048b7112b0\") " pod="metallb-system/speaker-7wr9z" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.072925 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/a6e7ae26-ae37-48c9-97ca-917a4e92a535-reloader\") pod \"frr-k8s-2zrlg\" (UID: \"a6e7ae26-ae37-48c9-97ca-917a4e92a535\") " pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.072948 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fa0ca8a9-96d3-40dc-916f-97048b7112b0-metrics-certs\") pod \"speaker-7wr9z\" (UID: \"fa0ca8a9-96d3-40dc-916f-97048b7112b0\") " pod="metallb-system/speaker-7wr9z" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.072973 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a6e7ae26-ae37-48c9-97ca-917a4e92a535-metrics-certs\") pod \"frr-k8s-2zrlg\" (UID: \"a6e7ae26-ae37-48c9-97ca-917a4e92a535\") " pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.072989 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/fa0ca8a9-96d3-40dc-916f-97048b7112b0-memberlist\") pod \"speaker-7wr9z\" (UID: \"fa0ca8a9-96d3-40dc-916f-97048b7112b0\") " pod="metallb-system/speaker-7wr9z" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.073011 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3e29f306-3558-4854-9ada-3ff94d2ad700-cert\") pod \"controller-6c7b4b5f48-m58j2\" (UID: \"3e29f306-3558-4854-9ada-3ff94d2ad700\") " pod="metallb-system/controller-6c7b4b5f48-m58j2" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.073102 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/a6e7ae26-ae37-48c9-97ca-917a4e92a535-frr-conf\") pod \"frr-k8s-2zrlg\" (UID: \"a6e7ae26-ae37-48c9-97ca-917a4e92a535\") " pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.073283 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/a6e7ae26-ae37-48c9-97ca-917a4e92a535-metrics\") pod \"frr-k8s-2zrlg\" (UID: \"a6e7ae26-ae37-48c9-97ca-917a4e92a535\") " pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.073334 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/a6e7ae26-ae37-48c9-97ca-917a4e92a535-reloader\") pod \"frr-k8s-2zrlg\" (UID: \"a6e7ae26-ae37-48c9-97ca-917a4e92a535\") " pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.073580 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/a6e7ae26-ae37-48c9-97ca-917a4e92a535-frr-sockets\") pod \"frr-k8s-2zrlg\" (UID: \"a6e7ae26-ae37-48c9-97ca-917a4e92a535\") " pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:23 crc kubenswrapper[5039]: E1124 13:34:23.073655 5039 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Nov 24 13:34:23 crc kubenswrapper[5039]: E1124 13:34:23.073702 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d341f082-ff80-43f0-aa5c-1476f8addb05-cert podName:d341f082-ff80-43f0-aa5c-1476f8addb05 nodeName:}" failed. No retries permitted until 2025-11-24 13:34:23.573683931 +0000 UTC m=+976.012808431 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d341f082-ff80-43f0-aa5c-1476f8addb05-cert") pod "frr-k8s-webhook-server-6998585d5-kn25p" (UID: "d341f082-ff80-43f0-aa5c-1476f8addb05") : secret "frr-k8s-webhook-server-cert" not found Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.073756 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/a6e7ae26-ae37-48c9-97ca-917a4e92a535-frr-startup\") pod \"frr-k8s-2zrlg\" (UID: \"a6e7ae26-ae37-48c9-97ca-917a4e92a535\") " pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.079278 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a6e7ae26-ae37-48c9-97ca-917a4e92a535-metrics-certs\") pod \"frr-k8s-2zrlg\" (UID: \"a6e7ae26-ae37-48c9-97ca-917a4e92a535\") " pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.095358 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kv8s\" (UniqueName: \"kubernetes.io/projected/d341f082-ff80-43f0-aa5c-1476f8addb05-kube-api-access-2kv8s\") pod \"frr-k8s-webhook-server-6998585d5-kn25p\" (UID: \"d341f082-ff80-43f0-aa5c-1476f8addb05\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-kn25p" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.099455 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xfmd\" (UniqueName: \"kubernetes.io/projected/a6e7ae26-ae37-48c9-97ca-917a4e92a535-kube-api-access-7xfmd\") pod \"frr-k8s-2zrlg\" (UID: \"a6e7ae26-ae37-48c9-97ca-917a4e92a535\") " pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.174927 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mtxst\" (UniqueName: \"kubernetes.io/projected/fa0ca8a9-96d3-40dc-916f-97048b7112b0-kube-api-access-mtxst\") pod \"speaker-7wr9z\" (UID: \"fa0ca8a9-96d3-40dc-916f-97048b7112b0\") " pod="metallb-system/speaker-7wr9z" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.175008 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/fa0ca8a9-96d3-40dc-916f-97048b7112b0-metallb-excludel2\") pod \"speaker-7wr9z\" (UID: \"fa0ca8a9-96d3-40dc-916f-97048b7112b0\") " pod="metallb-system/speaker-7wr9z" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.175058 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fa0ca8a9-96d3-40dc-916f-97048b7112b0-metrics-certs\") pod \"speaker-7wr9z\" (UID: \"fa0ca8a9-96d3-40dc-916f-97048b7112b0\") " pod="metallb-system/speaker-7wr9z" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.175095 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/fa0ca8a9-96d3-40dc-916f-97048b7112b0-memberlist\") pod \"speaker-7wr9z\" (UID: \"fa0ca8a9-96d3-40dc-916f-97048b7112b0\") " pod="metallb-system/speaker-7wr9z" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.175142 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3e29f306-3558-4854-9ada-3ff94d2ad700-cert\") pod \"controller-6c7b4b5f48-m58j2\" (UID: \"3e29f306-3558-4854-9ada-3ff94d2ad700\") " pod="metallb-system/controller-6c7b4b5f48-m58j2" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.175197 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dl7mc\" (UniqueName: \"kubernetes.io/projected/3e29f306-3558-4854-9ada-3ff94d2ad700-kube-api-access-dl7mc\") pod \"controller-6c7b4b5f48-m58j2\" (UID: \"3e29f306-3558-4854-9ada-3ff94d2ad700\") " pod="metallb-system/controller-6c7b4b5f48-m58j2" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.175239 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3e29f306-3558-4854-9ada-3ff94d2ad700-metrics-certs\") pod \"controller-6c7b4b5f48-m58j2\" (UID: \"3e29f306-3558-4854-9ada-3ff94d2ad700\") " pod="metallb-system/controller-6c7b4b5f48-m58j2" Nov 24 13:34:23 crc kubenswrapper[5039]: E1124 13:34:23.175412 5039 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Nov 24 13:34:23 crc kubenswrapper[5039]: E1124 13:34:23.175461 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3e29f306-3558-4854-9ada-3ff94d2ad700-metrics-certs podName:3e29f306-3558-4854-9ada-3ff94d2ad700 nodeName:}" failed. No retries permitted until 2025-11-24 13:34:23.675446256 +0000 UTC m=+976.114570756 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3e29f306-3558-4854-9ada-3ff94d2ad700-metrics-certs") pod "controller-6c7b4b5f48-m58j2" (UID: "3e29f306-3558-4854-9ada-3ff94d2ad700") : secret "controller-certs-secret" not found Nov 24 13:34:23 crc kubenswrapper[5039]: E1124 13:34:23.175943 5039 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 24 13:34:23 crc kubenswrapper[5039]: E1124 13:34:23.176133 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fa0ca8a9-96d3-40dc-916f-97048b7112b0-memberlist podName:fa0ca8a9-96d3-40dc-916f-97048b7112b0 nodeName:}" failed. No retries permitted until 2025-11-24 13:34:23.676104333 +0000 UTC m=+976.115228833 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/fa0ca8a9-96d3-40dc-916f-97048b7112b0-memberlist") pod "speaker-7wr9z" (UID: "fa0ca8a9-96d3-40dc-916f-97048b7112b0") : secret "metallb-memberlist" not found Nov 24 13:34:23 crc kubenswrapper[5039]: E1124 13:34:23.175984 5039 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Nov 24 13:34:23 crc kubenswrapper[5039]: E1124 13:34:23.176294 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fa0ca8a9-96d3-40dc-916f-97048b7112b0-metrics-certs podName:fa0ca8a9-96d3-40dc-916f-97048b7112b0 nodeName:}" failed. No retries permitted until 2025-11-24 13:34:23.676286257 +0000 UTC m=+976.115410757 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fa0ca8a9-96d3-40dc-916f-97048b7112b0-metrics-certs") pod "speaker-7wr9z" (UID: "fa0ca8a9-96d3-40dc-916f-97048b7112b0") : secret "speaker-certs-secret" not found Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.176473 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/fa0ca8a9-96d3-40dc-916f-97048b7112b0-metallb-excludel2\") pod \"speaker-7wr9z\" (UID: \"fa0ca8a9-96d3-40dc-916f-97048b7112b0\") " pod="metallb-system/speaker-7wr9z" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.177959 5039 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.189399 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3e29f306-3558-4854-9ada-3ff94d2ad700-cert\") pod \"controller-6c7b4b5f48-m58j2\" (UID: \"3e29f306-3558-4854-9ada-3ff94d2ad700\") " pod="metallb-system/controller-6c7b4b5f48-m58j2" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.195013 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mtxst\" (UniqueName: \"kubernetes.io/projected/fa0ca8a9-96d3-40dc-916f-97048b7112b0-kube-api-access-mtxst\") pod \"speaker-7wr9z\" (UID: \"fa0ca8a9-96d3-40dc-916f-97048b7112b0\") " pod="metallb-system/speaker-7wr9z" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.195619 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dl7mc\" (UniqueName: \"kubernetes.io/projected/3e29f306-3558-4854-9ada-3ff94d2ad700-kube-api-access-dl7mc\") pod \"controller-6c7b4b5f48-m58j2\" (UID: \"3e29f306-3558-4854-9ada-3ff94d2ad700\") " pod="metallb-system/controller-6c7b4b5f48-m58j2" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.219304 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.581534 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d341f082-ff80-43f0-aa5c-1476f8addb05-cert\") pod \"frr-k8s-webhook-server-6998585d5-kn25p\" (UID: \"d341f082-ff80-43f0-aa5c-1476f8addb05\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-kn25p" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.587897 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d341f082-ff80-43f0-aa5c-1476f8addb05-cert\") pod \"frr-k8s-webhook-server-6998585d5-kn25p\" (UID: \"d341f082-ff80-43f0-aa5c-1476f8addb05\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-kn25p" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.666842 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-2zrlg" event={"ID":"a6e7ae26-ae37-48c9-97ca-917a4e92a535","Type":"ContainerStarted","Data":"26145eda9c309fe1f6ee6263747058f63fb41ed14de675273b36b40546683b40"} Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.682294 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fa0ca8a9-96d3-40dc-916f-97048b7112b0-metrics-certs\") pod \"speaker-7wr9z\" (UID: \"fa0ca8a9-96d3-40dc-916f-97048b7112b0\") " pod="metallb-system/speaker-7wr9z" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.682344 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/fa0ca8a9-96d3-40dc-916f-97048b7112b0-memberlist\") pod \"speaker-7wr9z\" (UID: \"fa0ca8a9-96d3-40dc-916f-97048b7112b0\") " pod="metallb-system/speaker-7wr9z" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.682420 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3e29f306-3558-4854-9ada-3ff94d2ad700-metrics-certs\") pod \"controller-6c7b4b5f48-m58j2\" (UID: \"3e29f306-3558-4854-9ada-3ff94d2ad700\") " pod="metallb-system/controller-6c7b4b5f48-m58j2" Nov 24 13:34:23 crc kubenswrapper[5039]: E1124 13:34:23.682814 5039 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 24 13:34:23 crc kubenswrapper[5039]: E1124 13:34:23.682900 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fa0ca8a9-96d3-40dc-916f-97048b7112b0-memberlist podName:fa0ca8a9-96d3-40dc-916f-97048b7112b0 nodeName:}" failed. No retries permitted until 2025-11-24 13:34:24.682877445 +0000 UTC m=+977.122001965 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/fa0ca8a9-96d3-40dc-916f-97048b7112b0-memberlist") pod "speaker-7wr9z" (UID: "fa0ca8a9-96d3-40dc-916f-97048b7112b0") : secret "metallb-memberlist" not found Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.686717 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fa0ca8a9-96d3-40dc-916f-97048b7112b0-metrics-certs\") pod \"speaker-7wr9z\" (UID: \"fa0ca8a9-96d3-40dc-916f-97048b7112b0\") " pod="metallb-system/speaker-7wr9z" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.687118 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3e29f306-3558-4854-9ada-3ff94d2ad700-metrics-certs\") pod \"controller-6c7b4b5f48-m58j2\" (UID: \"3e29f306-3558-4854-9ada-3ff94d2ad700\") " pod="metallb-system/controller-6c7b4b5f48-m58j2" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.808376 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-kn25p" Nov 24 13:34:23 crc kubenswrapper[5039]: I1124 13:34:23.931483 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-m58j2" Nov 24 13:34:24 crc kubenswrapper[5039]: I1124 13:34:24.291059 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-kn25p"] Nov 24 13:34:24 crc kubenswrapper[5039]: I1124 13:34:24.373833 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-m58j2"] Nov 24 13:34:24 crc kubenswrapper[5039]: W1124 13:34:24.376189 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3e29f306_3558_4854_9ada_3ff94d2ad700.slice/crio-417afb7e7f991b0ccefe86fca6fe9265975c309e20645c7353833f67dcec742b WatchSource:0}: Error finding container 417afb7e7f991b0ccefe86fca6fe9265975c309e20645c7353833f67dcec742b: Status 404 returned error can't find the container with id 417afb7e7f991b0ccefe86fca6fe9265975c309e20645c7353833f67dcec742b Nov 24 13:34:24 crc kubenswrapper[5039]: I1124 13:34:24.686148 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-kn25p" event={"ID":"d341f082-ff80-43f0-aa5c-1476f8addb05","Type":"ContainerStarted","Data":"e3819cabfd4f174a60c43a4bec90b14e693741f10a6bb4a151ac51295e397bee"} Nov 24 13:34:24 crc kubenswrapper[5039]: I1124 13:34:24.698480 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/fa0ca8a9-96d3-40dc-916f-97048b7112b0-memberlist\") pod \"speaker-7wr9z\" (UID: \"fa0ca8a9-96d3-40dc-916f-97048b7112b0\") " pod="metallb-system/speaker-7wr9z" Nov 24 13:34:24 crc kubenswrapper[5039]: E1124 13:34:24.698784 5039 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 24 13:34:24 crc kubenswrapper[5039]: E1124 13:34:24.698852 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fa0ca8a9-96d3-40dc-916f-97048b7112b0-memberlist podName:fa0ca8a9-96d3-40dc-916f-97048b7112b0 nodeName:}" failed. No retries permitted until 2025-11-24 13:34:26.698831866 +0000 UTC m=+979.137956366 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/fa0ca8a9-96d3-40dc-916f-97048b7112b0-memberlist") pod "speaker-7wr9z" (UID: "fa0ca8a9-96d3-40dc-916f-97048b7112b0") : secret "metallb-memberlist" not found Nov 24 13:34:24 crc kubenswrapper[5039]: I1124 13:34:24.699452 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-m58j2" event={"ID":"3e29f306-3558-4854-9ada-3ff94d2ad700","Type":"ContainerStarted","Data":"2c521540bff5210c010e5d27d8caf213aeab08147f71f10afdbff2fa756a98f3"} Nov 24 13:34:24 crc kubenswrapper[5039]: I1124 13:34:24.699493 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-m58j2" event={"ID":"3e29f306-3558-4854-9ada-3ff94d2ad700","Type":"ContainerStarted","Data":"417afb7e7f991b0ccefe86fca6fe9265975c309e20645c7353833f67dcec742b"} Nov 24 13:34:25 crc kubenswrapper[5039]: I1124 13:34:25.725647 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-m58j2" event={"ID":"3e29f306-3558-4854-9ada-3ff94d2ad700","Type":"ContainerStarted","Data":"0a0b5cf1cf3b57295a3fc3dbddf0f021589407445e8607085f6bfcb2c49dc147"} Nov 24 13:34:25 crc kubenswrapper[5039]: I1124 13:34:25.726472 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-m58j2" Nov 24 13:34:25 crc kubenswrapper[5039]: I1124 13:34:25.746553 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-m58j2" podStartSLOduration=3.746532575 podStartE2EDuration="3.746532575s" podCreationTimestamp="2025-11-24 13:34:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:34:25.745005899 +0000 UTC m=+978.184130399" watchObservedRunningTime="2025-11-24 13:34:25.746532575 +0000 UTC m=+978.185657075" Nov 24 13:34:26 crc kubenswrapper[5039]: I1124 13:34:26.728355 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/fa0ca8a9-96d3-40dc-916f-97048b7112b0-memberlist\") pod \"speaker-7wr9z\" (UID: \"fa0ca8a9-96d3-40dc-916f-97048b7112b0\") " pod="metallb-system/speaker-7wr9z" Nov 24 13:34:26 crc kubenswrapper[5039]: I1124 13:34:26.781521 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/fa0ca8a9-96d3-40dc-916f-97048b7112b0-memberlist\") pod \"speaker-7wr9z\" (UID: \"fa0ca8a9-96d3-40dc-916f-97048b7112b0\") " pod="metallb-system/speaker-7wr9z" Nov 24 13:34:26 crc kubenswrapper[5039]: I1124 13:34:26.900979 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-7wr9z" Nov 24 13:34:26 crc kubenswrapper[5039]: W1124 13:34:26.931909 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfa0ca8a9_96d3_40dc_916f_97048b7112b0.slice/crio-1752d1cfcf59308ec50f5ed3c7c35086601baed6dc6b9ba50303acc2212a0c52 WatchSource:0}: Error finding container 1752d1cfcf59308ec50f5ed3c7c35086601baed6dc6b9ba50303acc2212a0c52: Status 404 returned error can't find the container with id 1752d1cfcf59308ec50f5ed3c7c35086601baed6dc6b9ba50303acc2212a0c52 Nov 24 13:34:27 crc kubenswrapper[5039]: I1124 13:34:27.741972 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-7wr9z" event={"ID":"fa0ca8a9-96d3-40dc-916f-97048b7112b0","Type":"ContainerStarted","Data":"83d255ef536868cedf7763e6309d0416d4e05f2ff7a27c8d65b556d6386b6fbf"} Nov 24 13:34:27 crc kubenswrapper[5039]: I1124 13:34:27.742318 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-7wr9z" event={"ID":"fa0ca8a9-96d3-40dc-916f-97048b7112b0","Type":"ContainerStarted","Data":"07ec293287f6955a6de2a435a5e76bea37b685b96ed2300d027c67faf95f38a1"} Nov 24 13:34:27 crc kubenswrapper[5039]: I1124 13:34:27.742330 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-7wr9z" event={"ID":"fa0ca8a9-96d3-40dc-916f-97048b7112b0","Type":"ContainerStarted","Data":"1752d1cfcf59308ec50f5ed3c7c35086601baed6dc6b9ba50303acc2212a0c52"} Nov 24 13:34:27 crc kubenswrapper[5039]: I1124 13:34:27.742494 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-7wr9z" Nov 24 13:34:27 crc kubenswrapper[5039]: I1124 13:34:27.758756 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-7wr9z" podStartSLOduration=5.758738183 podStartE2EDuration="5.758738183s" podCreationTimestamp="2025-11-24 13:34:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:34:27.756571961 +0000 UTC m=+980.195696471" watchObservedRunningTime="2025-11-24 13:34:27.758738183 +0000 UTC m=+980.197862683" Nov 24 13:34:31 crc kubenswrapper[5039]: I1124 13:34:31.774990 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-kn25p" event={"ID":"d341f082-ff80-43f0-aa5c-1476f8addb05","Type":"ContainerStarted","Data":"9dfcb69af0fd93ca33c76544aa325374ca608fb37e34a578b570ca220851fbd2"} Nov 24 13:34:31 crc kubenswrapper[5039]: I1124 13:34:31.776386 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-kn25p" Nov 24 13:34:31 crc kubenswrapper[5039]: I1124 13:34:31.777958 5039 generic.go:334] "Generic (PLEG): container finished" podID="a6e7ae26-ae37-48c9-97ca-917a4e92a535" containerID="5c7673fc3c0e4f6da43b7a6af60d9e2036b98f9dc761a4a9b6e99092cb7b56c5" exitCode=0 Nov 24 13:34:31 crc kubenswrapper[5039]: I1124 13:34:31.778045 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-2zrlg" event={"ID":"a6e7ae26-ae37-48c9-97ca-917a4e92a535","Type":"ContainerDied","Data":"5c7673fc3c0e4f6da43b7a6af60d9e2036b98f9dc761a4a9b6e99092cb7b56c5"} Nov 24 13:34:31 crc kubenswrapper[5039]: I1124 13:34:31.808688 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-kn25p" podStartSLOduration=2.839514502 podStartE2EDuration="9.808660475s" podCreationTimestamp="2025-11-24 13:34:22 +0000 UTC" firstStartedPulling="2025-11-24 13:34:24.296585818 +0000 UTC m=+976.735710328" lastFinishedPulling="2025-11-24 13:34:31.265731771 +0000 UTC m=+983.704856301" observedRunningTime="2025-11-24 13:34:31.798337537 +0000 UTC m=+984.237462037" watchObservedRunningTime="2025-11-24 13:34:31.808660475 +0000 UTC m=+984.247784995" Nov 24 13:34:32 crc kubenswrapper[5039]: I1124 13:34:32.787743 5039 generic.go:334] "Generic (PLEG): container finished" podID="a6e7ae26-ae37-48c9-97ca-917a4e92a535" containerID="83731188700e1e9b69cf7780c96453ba1a97a2a0d47eee6cc1a134914be5eb9f" exitCode=0 Nov 24 13:34:32 crc kubenswrapper[5039]: I1124 13:34:32.788686 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-2zrlg" event={"ID":"a6e7ae26-ae37-48c9-97ca-917a4e92a535","Type":"ContainerDied","Data":"83731188700e1e9b69cf7780c96453ba1a97a2a0d47eee6cc1a134914be5eb9f"} Nov 24 13:34:33 crc kubenswrapper[5039]: I1124 13:34:33.797760 5039 generic.go:334] "Generic (PLEG): container finished" podID="a6e7ae26-ae37-48c9-97ca-917a4e92a535" containerID="d2ce6fae231aaef9f906da315b938a89a541c54c9b47e4692b1276b693741f59" exitCode=0 Nov 24 13:34:33 crc kubenswrapper[5039]: I1124 13:34:33.797812 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-2zrlg" event={"ID":"a6e7ae26-ae37-48c9-97ca-917a4e92a535","Type":"ContainerDied","Data":"d2ce6fae231aaef9f906da315b938a89a541c54c9b47e4692b1276b693741f59"} Nov 24 13:34:34 crc kubenswrapper[5039]: I1124 13:34:34.808381 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-2zrlg" event={"ID":"a6e7ae26-ae37-48c9-97ca-917a4e92a535","Type":"ContainerStarted","Data":"c3e0906716bf027f4c2972740a5f6cef31ff6ec56ece16d6338fd1d336888dd5"} Nov 24 13:34:34 crc kubenswrapper[5039]: I1124 13:34:34.808621 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-2zrlg" event={"ID":"a6e7ae26-ae37-48c9-97ca-917a4e92a535","Type":"ContainerStarted","Data":"5fba00840d79e1ed42514433061bd47d9060aef00ba2936da97bb4682dae88f2"} Nov 24 13:34:34 crc kubenswrapper[5039]: I1124 13:34:34.808634 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-2zrlg" event={"ID":"a6e7ae26-ae37-48c9-97ca-917a4e92a535","Type":"ContainerStarted","Data":"9569020f00bcf7dbe70050496e2a1e493cd3bd6b3659d679d491bcaa793fd7fa"} Nov 24 13:34:34 crc kubenswrapper[5039]: I1124 13:34:34.808645 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-2zrlg" event={"ID":"a6e7ae26-ae37-48c9-97ca-917a4e92a535","Type":"ContainerStarted","Data":"b29e84f86519e2b7455395a2cad1169cf77746ead7f85f36bba84bbe078cf612"} Nov 24 13:34:35 crc kubenswrapper[5039]: I1124 13:34:35.818407 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-2zrlg" event={"ID":"a6e7ae26-ae37-48c9-97ca-917a4e92a535","Type":"ContainerStarted","Data":"ba0013484009811c4f1c107e0356688f189cc6b5576d02b9af1f123c8f0cecf7"} Nov 24 13:34:35 crc kubenswrapper[5039]: I1124 13:34:35.818459 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-2zrlg" event={"ID":"a6e7ae26-ae37-48c9-97ca-917a4e92a535","Type":"ContainerStarted","Data":"a7146e4b2b31ae9f2b32db99cfa2924e0f39dffd6144a30395895a36ebf91ec2"} Nov 24 13:34:36 crc kubenswrapper[5039]: I1124 13:34:36.823819 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:38 crc kubenswrapper[5039]: I1124 13:34:38.220340 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:38 crc kubenswrapper[5039]: I1124 13:34:38.277395 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:38 crc kubenswrapper[5039]: I1124 13:34:38.297700 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-2zrlg" podStartSLOduration=8.48186602 podStartE2EDuration="16.297684091s" podCreationTimestamp="2025-11-24 13:34:22 +0000 UTC" firstStartedPulling="2025-11-24 13:34:23.401110213 +0000 UTC m=+975.840234733" lastFinishedPulling="2025-11-24 13:34:31.216928304 +0000 UTC m=+983.656052804" observedRunningTime="2025-11-24 13:34:35.843080704 +0000 UTC m=+988.282205224" watchObservedRunningTime="2025-11-24 13:34:38.297684091 +0000 UTC m=+990.736808591" Nov 24 13:34:43 crc kubenswrapper[5039]: I1124 13:34:43.226736 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-2zrlg" Nov 24 13:34:43 crc kubenswrapper[5039]: I1124 13:34:43.815649 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-kn25p" Nov 24 13:34:43 crc kubenswrapper[5039]: I1124 13:34:43.947985 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-m58j2" Nov 24 13:34:46 crc kubenswrapper[5039]: I1124 13:34:46.905753 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-7wr9z" Nov 24 13:34:50 crc kubenswrapper[5039]: I1124 13:34:50.271178 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-z2877"] Nov 24 13:34:50 crc kubenswrapper[5039]: I1124 13:34:50.273016 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-z2877" Nov 24 13:34:50 crc kubenswrapper[5039]: I1124 13:34:50.275146 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 24 13:34:50 crc kubenswrapper[5039]: I1124 13:34:50.277250 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-m4lkc" Nov 24 13:34:50 crc kubenswrapper[5039]: I1124 13:34:50.280065 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 24 13:34:50 crc kubenswrapper[5039]: I1124 13:34:50.281623 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-z2877"] Nov 24 13:34:50 crc kubenswrapper[5039]: I1124 13:34:50.379105 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8z9dc\" (UniqueName: \"kubernetes.io/projected/3d9bec87-1da8-4761-bd29-e6f11182d041-kube-api-access-8z9dc\") pod \"openstack-operator-index-z2877\" (UID: \"3d9bec87-1da8-4761-bd29-e6f11182d041\") " pod="openstack-operators/openstack-operator-index-z2877" Nov 24 13:34:50 crc kubenswrapper[5039]: I1124 13:34:50.480438 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8z9dc\" (UniqueName: \"kubernetes.io/projected/3d9bec87-1da8-4761-bd29-e6f11182d041-kube-api-access-8z9dc\") pod \"openstack-operator-index-z2877\" (UID: \"3d9bec87-1da8-4761-bd29-e6f11182d041\") " pod="openstack-operators/openstack-operator-index-z2877" Nov 24 13:34:50 crc kubenswrapper[5039]: I1124 13:34:50.504553 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8z9dc\" (UniqueName: \"kubernetes.io/projected/3d9bec87-1da8-4761-bd29-e6f11182d041-kube-api-access-8z9dc\") pod \"openstack-operator-index-z2877\" (UID: \"3d9bec87-1da8-4761-bd29-e6f11182d041\") " pod="openstack-operators/openstack-operator-index-z2877" Nov 24 13:34:50 crc kubenswrapper[5039]: I1124 13:34:50.614735 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-z2877" Nov 24 13:34:51 crc kubenswrapper[5039]: I1124 13:34:51.008171 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-z2877"] Nov 24 13:34:51 crc kubenswrapper[5039]: W1124 13:34:51.012552 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3d9bec87_1da8_4761_bd29_e6f11182d041.slice/crio-ca273a0cbff0c69fa6fa03d24230adb311d54eed81fcd7d32a769db8dd22ff4a WatchSource:0}: Error finding container ca273a0cbff0c69fa6fa03d24230adb311d54eed81fcd7d32a769db8dd22ff4a: Status 404 returned error can't find the container with id ca273a0cbff0c69fa6fa03d24230adb311d54eed81fcd7d32a769db8dd22ff4a Nov 24 13:34:51 crc kubenswrapper[5039]: I1124 13:34:51.931207 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-z2877" event={"ID":"3d9bec87-1da8-4761-bd29-e6f11182d041","Type":"ContainerStarted","Data":"ca273a0cbff0c69fa6fa03d24230adb311d54eed81fcd7d32a769db8dd22ff4a"} Nov 24 13:34:53 crc kubenswrapper[5039]: I1124 13:34:53.646403 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-z2877"] Nov 24 13:34:54 crc kubenswrapper[5039]: I1124 13:34:54.250767 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-lg47r"] Nov 24 13:34:54 crc kubenswrapper[5039]: I1124 13:34:54.252078 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-lg47r" Nov 24 13:34:54 crc kubenswrapper[5039]: I1124 13:34:54.278735 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-lg47r"] Nov 24 13:34:54 crc kubenswrapper[5039]: I1124 13:34:54.335536 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l24jc\" (UniqueName: \"kubernetes.io/projected/30a0cc80-e6f2-48b0-9469-32d5d397c0aa-kube-api-access-l24jc\") pod \"openstack-operator-index-lg47r\" (UID: \"30a0cc80-e6f2-48b0-9469-32d5d397c0aa\") " pod="openstack-operators/openstack-operator-index-lg47r" Nov 24 13:34:54 crc kubenswrapper[5039]: I1124 13:34:54.436948 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l24jc\" (UniqueName: \"kubernetes.io/projected/30a0cc80-e6f2-48b0-9469-32d5d397c0aa-kube-api-access-l24jc\") pod \"openstack-operator-index-lg47r\" (UID: \"30a0cc80-e6f2-48b0-9469-32d5d397c0aa\") " pod="openstack-operators/openstack-operator-index-lg47r" Nov 24 13:34:54 crc kubenswrapper[5039]: I1124 13:34:54.457108 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l24jc\" (UniqueName: \"kubernetes.io/projected/30a0cc80-e6f2-48b0-9469-32d5d397c0aa-kube-api-access-l24jc\") pod \"openstack-operator-index-lg47r\" (UID: \"30a0cc80-e6f2-48b0-9469-32d5d397c0aa\") " pod="openstack-operators/openstack-operator-index-lg47r" Nov 24 13:34:54 crc kubenswrapper[5039]: I1124 13:34:54.601008 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-lg47r" Nov 24 13:34:56 crc kubenswrapper[5039]: I1124 13:34:56.025234 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-lg47r"] Nov 24 13:34:56 crc kubenswrapper[5039]: I1124 13:34:56.967148 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-lg47r" event={"ID":"30a0cc80-e6f2-48b0-9469-32d5d397c0aa","Type":"ContainerStarted","Data":"64e37c9937cf8c77bc5643c4346e787feadf90df5ad91922f8a2ba5313d75164"} Nov 24 13:34:57 crc kubenswrapper[5039]: I1124 13:34:57.975682 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-z2877" event={"ID":"3d9bec87-1da8-4761-bd29-e6f11182d041","Type":"ContainerStarted","Data":"10e48108164689419b0888e0e41779a31afba0d4bee0c58f7236f9f9f6a9bf0a"} Nov 24 13:34:57 crc kubenswrapper[5039]: I1124 13:34:57.975759 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-z2877" podUID="3d9bec87-1da8-4761-bd29-e6f11182d041" containerName="registry-server" containerID="cri-o://10e48108164689419b0888e0e41779a31afba0d4bee0c58f7236f9f9f6a9bf0a" gracePeriod=2 Nov 24 13:34:57 crc kubenswrapper[5039]: I1124 13:34:57.976993 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-lg47r" event={"ID":"30a0cc80-e6f2-48b0-9469-32d5d397c0aa","Type":"ContainerStarted","Data":"1cfbcb2a74685aff5a1d9426ab486ca1d80eee3da6b6f6db6314d4c626154c64"} Nov 24 13:34:57 crc kubenswrapper[5039]: I1124 13:34:57.997293 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-z2877" podStartSLOduration=1.5564756659999999 podStartE2EDuration="7.997277598s" podCreationTimestamp="2025-11-24 13:34:50 +0000 UTC" firstStartedPulling="2025-11-24 13:34:51.017481966 +0000 UTC m=+1003.456606456" lastFinishedPulling="2025-11-24 13:34:57.458283838 +0000 UTC m=+1009.897408388" observedRunningTime="2025-11-24 13:34:57.995812932 +0000 UTC m=+1010.434937422" watchObservedRunningTime="2025-11-24 13:34:57.997277598 +0000 UTC m=+1010.436402098" Nov 24 13:34:58 crc kubenswrapper[5039]: I1124 13:34:58.011548 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-lg47r" podStartSLOduration=2.847089366 podStartE2EDuration="4.011527602s" podCreationTimestamp="2025-11-24 13:34:54 +0000 UTC" firstStartedPulling="2025-11-24 13:34:56.294828175 +0000 UTC m=+1008.733952695" lastFinishedPulling="2025-11-24 13:34:57.459266391 +0000 UTC m=+1009.898390931" observedRunningTime="2025-11-24 13:34:58.01065271 +0000 UTC m=+1010.449777210" watchObservedRunningTime="2025-11-24 13:34:58.011527602 +0000 UTC m=+1010.450652102" Nov 24 13:34:58 crc kubenswrapper[5039]: I1124 13:34:58.397973 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-z2877" Nov 24 13:34:58 crc kubenswrapper[5039]: I1124 13:34:58.502410 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8z9dc\" (UniqueName: \"kubernetes.io/projected/3d9bec87-1da8-4761-bd29-e6f11182d041-kube-api-access-8z9dc\") pod \"3d9bec87-1da8-4761-bd29-e6f11182d041\" (UID: \"3d9bec87-1da8-4761-bd29-e6f11182d041\") " Nov 24 13:34:58 crc kubenswrapper[5039]: I1124 13:34:58.509277 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d9bec87-1da8-4761-bd29-e6f11182d041-kube-api-access-8z9dc" (OuterVolumeSpecName: "kube-api-access-8z9dc") pod "3d9bec87-1da8-4761-bd29-e6f11182d041" (UID: "3d9bec87-1da8-4761-bd29-e6f11182d041"). InnerVolumeSpecName "kube-api-access-8z9dc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:34:58 crc kubenswrapper[5039]: I1124 13:34:58.605294 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8z9dc\" (UniqueName: \"kubernetes.io/projected/3d9bec87-1da8-4761-bd29-e6f11182d041-kube-api-access-8z9dc\") on node \"crc\" DevicePath \"\"" Nov 24 13:34:58 crc kubenswrapper[5039]: I1124 13:34:58.987310 5039 generic.go:334] "Generic (PLEG): container finished" podID="3d9bec87-1da8-4761-bd29-e6f11182d041" containerID="10e48108164689419b0888e0e41779a31afba0d4bee0c58f7236f9f9f6a9bf0a" exitCode=0 Nov 24 13:34:58 crc kubenswrapper[5039]: I1124 13:34:58.987367 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-z2877" event={"ID":"3d9bec87-1da8-4761-bd29-e6f11182d041","Type":"ContainerDied","Data":"10e48108164689419b0888e0e41779a31afba0d4bee0c58f7236f9f9f6a9bf0a"} Nov 24 13:34:58 crc kubenswrapper[5039]: I1124 13:34:58.987418 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-z2877" event={"ID":"3d9bec87-1da8-4761-bd29-e6f11182d041","Type":"ContainerDied","Data":"ca273a0cbff0c69fa6fa03d24230adb311d54eed81fcd7d32a769db8dd22ff4a"} Nov 24 13:34:58 crc kubenswrapper[5039]: I1124 13:34:58.987417 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-z2877" Nov 24 13:34:58 crc kubenswrapper[5039]: I1124 13:34:58.987441 5039 scope.go:117] "RemoveContainer" containerID="10e48108164689419b0888e0e41779a31afba0d4bee0c58f7236f9f9f6a9bf0a" Nov 24 13:34:59 crc kubenswrapper[5039]: I1124 13:34:59.015783 5039 scope.go:117] "RemoveContainer" containerID="10e48108164689419b0888e0e41779a31afba0d4bee0c58f7236f9f9f6a9bf0a" Nov 24 13:34:59 crc kubenswrapper[5039]: E1124 13:34:59.016126 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10e48108164689419b0888e0e41779a31afba0d4bee0c58f7236f9f9f6a9bf0a\": container with ID starting with 10e48108164689419b0888e0e41779a31afba0d4bee0c58f7236f9f9f6a9bf0a not found: ID does not exist" containerID="10e48108164689419b0888e0e41779a31afba0d4bee0c58f7236f9f9f6a9bf0a" Nov 24 13:34:59 crc kubenswrapper[5039]: I1124 13:34:59.016164 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10e48108164689419b0888e0e41779a31afba0d4bee0c58f7236f9f9f6a9bf0a"} err="failed to get container status \"10e48108164689419b0888e0e41779a31afba0d4bee0c58f7236f9f9f6a9bf0a\": rpc error: code = NotFound desc = could not find container \"10e48108164689419b0888e0e41779a31afba0d4bee0c58f7236f9f9f6a9bf0a\": container with ID starting with 10e48108164689419b0888e0e41779a31afba0d4bee0c58f7236f9f9f6a9bf0a not found: ID does not exist" Nov 24 13:34:59 crc kubenswrapper[5039]: I1124 13:34:59.044381 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-z2877"] Nov 24 13:34:59 crc kubenswrapper[5039]: I1124 13:34:59.049099 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-z2877"] Nov 24 13:35:00 crc kubenswrapper[5039]: I1124 13:35:00.318957 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d9bec87-1da8-4761-bd29-e6f11182d041" path="/var/lib/kubelet/pods/3d9bec87-1da8-4761-bd29-e6f11182d041/volumes" Nov 24 13:35:04 crc kubenswrapper[5039]: I1124 13:35:04.601091 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-lg47r" Nov 24 13:35:04 crc kubenswrapper[5039]: I1124 13:35:04.601671 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-lg47r" Nov 24 13:35:04 crc kubenswrapper[5039]: I1124 13:35:04.626577 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-lg47r" Nov 24 13:35:05 crc kubenswrapper[5039]: I1124 13:35:05.062123 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-lg47r" Nov 24 13:35:06 crc kubenswrapper[5039]: I1124 13:35:06.496312 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b"] Nov 24 13:35:06 crc kubenswrapper[5039]: E1124 13:35:06.496603 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d9bec87-1da8-4761-bd29-e6f11182d041" containerName="registry-server" Nov 24 13:35:06 crc kubenswrapper[5039]: I1124 13:35:06.496615 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d9bec87-1da8-4761-bd29-e6f11182d041" containerName="registry-server" Nov 24 13:35:06 crc kubenswrapper[5039]: I1124 13:35:06.496743 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d9bec87-1da8-4761-bd29-e6f11182d041" containerName="registry-server" Nov 24 13:35:06 crc kubenswrapper[5039]: I1124 13:35:06.497762 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b" Nov 24 13:35:06 crc kubenswrapper[5039]: I1124 13:35:06.502917 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-m4w89" Nov 24 13:35:06 crc kubenswrapper[5039]: I1124 13:35:06.524843 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b"] Nov 24 13:35:06 crc kubenswrapper[5039]: I1124 13:35:06.622830 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zhzl\" (UniqueName: \"kubernetes.io/projected/0dc29372-3c0b-496e-b027-e57abc3ca956-kube-api-access-5zhzl\") pod \"e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b\" (UID: \"0dc29372-3c0b-496e-b027-e57abc3ca956\") " pod="openstack-operators/e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b" Nov 24 13:35:06 crc kubenswrapper[5039]: I1124 13:35:06.623095 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0dc29372-3c0b-496e-b027-e57abc3ca956-util\") pod \"e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b\" (UID: \"0dc29372-3c0b-496e-b027-e57abc3ca956\") " pod="openstack-operators/e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b" Nov 24 13:35:06 crc kubenswrapper[5039]: I1124 13:35:06.623191 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0dc29372-3c0b-496e-b027-e57abc3ca956-bundle\") pod \"e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b\" (UID: \"0dc29372-3c0b-496e-b027-e57abc3ca956\") " pod="openstack-operators/e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b" Nov 24 13:35:06 crc kubenswrapper[5039]: I1124 13:35:06.724360 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zhzl\" (UniqueName: \"kubernetes.io/projected/0dc29372-3c0b-496e-b027-e57abc3ca956-kube-api-access-5zhzl\") pod \"e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b\" (UID: \"0dc29372-3c0b-496e-b027-e57abc3ca956\") " pod="openstack-operators/e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b" Nov 24 13:35:06 crc kubenswrapper[5039]: I1124 13:35:06.724684 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0dc29372-3c0b-496e-b027-e57abc3ca956-util\") pod \"e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b\" (UID: \"0dc29372-3c0b-496e-b027-e57abc3ca956\") " pod="openstack-operators/e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b" Nov 24 13:35:06 crc kubenswrapper[5039]: I1124 13:35:06.724810 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0dc29372-3c0b-496e-b027-e57abc3ca956-bundle\") pod \"e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b\" (UID: \"0dc29372-3c0b-496e-b027-e57abc3ca956\") " pod="openstack-operators/e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b" Nov 24 13:35:06 crc kubenswrapper[5039]: I1124 13:35:06.725247 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0dc29372-3c0b-496e-b027-e57abc3ca956-util\") pod \"e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b\" (UID: \"0dc29372-3c0b-496e-b027-e57abc3ca956\") " pod="openstack-operators/e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b" Nov 24 13:35:06 crc kubenswrapper[5039]: I1124 13:35:06.725279 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0dc29372-3c0b-496e-b027-e57abc3ca956-bundle\") pod \"e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b\" (UID: \"0dc29372-3c0b-496e-b027-e57abc3ca956\") " pod="openstack-operators/e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b" Nov 24 13:35:06 crc kubenswrapper[5039]: I1124 13:35:06.743450 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zhzl\" (UniqueName: \"kubernetes.io/projected/0dc29372-3c0b-496e-b027-e57abc3ca956-kube-api-access-5zhzl\") pod \"e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b\" (UID: \"0dc29372-3c0b-496e-b027-e57abc3ca956\") " pod="openstack-operators/e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b" Nov 24 13:35:06 crc kubenswrapper[5039]: I1124 13:35:06.824320 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b" Nov 24 13:35:07 crc kubenswrapper[5039]: I1124 13:35:07.243433 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b"] Nov 24 13:35:07 crc kubenswrapper[5039]: W1124 13:35:07.249761 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0dc29372_3c0b_496e_b027_e57abc3ca956.slice/crio-16d9ae9052aa4e4910cdc47248ddcb8d43d483dadf81211aeab5bbdea011beb2 WatchSource:0}: Error finding container 16d9ae9052aa4e4910cdc47248ddcb8d43d483dadf81211aeab5bbdea011beb2: Status 404 returned error can't find the container with id 16d9ae9052aa4e4910cdc47248ddcb8d43d483dadf81211aeab5bbdea011beb2 Nov 24 13:35:08 crc kubenswrapper[5039]: I1124 13:35:08.063490 5039 generic.go:334] "Generic (PLEG): container finished" podID="0dc29372-3c0b-496e-b027-e57abc3ca956" containerID="95bb2e96b00aecdac356c0bf6b0920d9570d33787c4766c87a332e24cc20cb9f" exitCode=0 Nov 24 13:35:08 crc kubenswrapper[5039]: I1124 13:35:08.063556 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b" event={"ID":"0dc29372-3c0b-496e-b027-e57abc3ca956","Type":"ContainerDied","Data":"95bb2e96b00aecdac356c0bf6b0920d9570d33787c4766c87a332e24cc20cb9f"} Nov 24 13:35:08 crc kubenswrapper[5039]: I1124 13:35:08.063582 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b" event={"ID":"0dc29372-3c0b-496e-b027-e57abc3ca956","Type":"ContainerStarted","Data":"16d9ae9052aa4e4910cdc47248ddcb8d43d483dadf81211aeab5bbdea011beb2"} Nov 24 13:35:10 crc kubenswrapper[5039]: I1124 13:35:10.085112 5039 generic.go:334] "Generic (PLEG): container finished" podID="0dc29372-3c0b-496e-b027-e57abc3ca956" containerID="9c9122daa1f0825de96f28fe4284316bb93251dd1fb196df772dcc3dc6be3097" exitCode=0 Nov 24 13:35:10 crc kubenswrapper[5039]: I1124 13:35:10.085606 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b" event={"ID":"0dc29372-3c0b-496e-b027-e57abc3ca956","Type":"ContainerDied","Data":"9c9122daa1f0825de96f28fe4284316bb93251dd1fb196df772dcc3dc6be3097"} Nov 24 13:35:11 crc kubenswrapper[5039]: I1124 13:35:11.096364 5039 generic.go:334] "Generic (PLEG): container finished" podID="0dc29372-3c0b-496e-b027-e57abc3ca956" containerID="a43eafbf8372903fa2876894fd803a76af2d56c64c78f3a026081c96ccce3082" exitCode=0 Nov 24 13:35:11 crc kubenswrapper[5039]: I1124 13:35:11.096420 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b" event={"ID":"0dc29372-3c0b-496e-b027-e57abc3ca956","Type":"ContainerDied","Data":"a43eafbf8372903fa2876894fd803a76af2d56c64c78f3a026081c96ccce3082"} Nov 24 13:35:12 crc kubenswrapper[5039]: I1124 13:35:12.461348 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b" Nov 24 13:35:12 crc kubenswrapper[5039]: I1124 13:35:12.507469 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0dc29372-3c0b-496e-b027-e57abc3ca956-util\") pod \"0dc29372-3c0b-496e-b027-e57abc3ca956\" (UID: \"0dc29372-3c0b-496e-b027-e57abc3ca956\") " Nov 24 13:35:12 crc kubenswrapper[5039]: I1124 13:35:12.507545 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0dc29372-3c0b-496e-b027-e57abc3ca956-bundle\") pod \"0dc29372-3c0b-496e-b027-e57abc3ca956\" (UID: \"0dc29372-3c0b-496e-b027-e57abc3ca956\") " Nov 24 13:35:12 crc kubenswrapper[5039]: I1124 13:35:12.507624 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5zhzl\" (UniqueName: \"kubernetes.io/projected/0dc29372-3c0b-496e-b027-e57abc3ca956-kube-api-access-5zhzl\") pod \"0dc29372-3c0b-496e-b027-e57abc3ca956\" (UID: \"0dc29372-3c0b-496e-b027-e57abc3ca956\") " Nov 24 13:35:12 crc kubenswrapper[5039]: I1124 13:35:12.508155 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0dc29372-3c0b-496e-b027-e57abc3ca956-bundle" (OuterVolumeSpecName: "bundle") pod "0dc29372-3c0b-496e-b027-e57abc3ca956" (UID: "0dc29372-3c0b-496e-b027-e57abc3ca956"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:35:12 crc kubenswrapper[5039]: I1124 13:35:12.514029 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0dc29372-3c0b-496e-b027-e57abc3ca956-kube-api-access-5zhzl" (OuterVolumeSpecName: "kube-api-access-5zhzl") pod "0dc29372-3c0b-496e-b027-e57abc3ca956" (UID: "0dc29372-3c0b-496e-b027-e57abc3ca956"). InnerVolumeSpecName "kube-api-access-5zhzl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:35:12 crc kubenswrapper[5039]: I1124 13:35:12.524819 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0dc29372-3c0b-496e-b027-e57abc3ca956-util" (OuterVolumeSpecName: "util") pod "0dc29372-3c0b-496e-b027-e57abc3ca956" (UID: "0dc29372-3c0b-496e-b027-e57abc3ca956"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:35:12 crc kubenswrapper[5039]: I1124 13:35:12.609867 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5zhzl\" (UniqueName: \"kubernetes.io/projected/0dc29372-3c0b-496e-b027-e57abc3ca956-kube-api-access-5zhzl\") on node \"crc\" DevicePath \"\"" Nov 24 13:35:12 crc kubenswrapper[5039]: I1124 13:35:12.609936 5039 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0dc29372-3c0b-496e-b027-e57abc3ca956-util\") on node \"crc\" DevicePath \"\"" Nov 24 13:35:12 crc kubenswrapper[5039]: I1124 13:35:12.609956 5039 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0dc29372-3c0b-496e-b027-e57abc3ca956-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:35:13 crc kubenswrapper[5039]: I1124 13:35:13.119925 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b" event={"ID":"0dc29372-3c0b-496e-b027-e57abc3ca956","Type":"ContainerDied","Data":"16d9ae9052aa4e4910cdc47248ddcb8d43d483dadf81211aeab5bbdea011beb2"} Nov 24 13:35:13 crc kubenswrapper[5039]: I1124 13:35:13.120013 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="16d9ae9052aa4e4910cdc47248ddcb8d43d483dadf81211aeab5bbdea011beb2" Nov 24 13:35:13 crc kubenswrapper[5039]: I1124 13:35:13.120194 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b" Nov 24 13:35:18 crc kubenswrapper[5039]: I1124 13:35:18.451994 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-6ccdcd8b77-9k4cr"] Nov 24 13:35:18 crc kubenswrapper[5039]: E1124 13:35:18.464204 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dc29372-3c0b-496e-b027-e57abc3ca956" containerName="util" Nov 24 13:35:18 crc kubenswrapper[5039]: I1124 13:35:18.464270 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dc29372-3c0b-496e-b027-e57abc3ca956" containerName="util" Nov 24 13:35:18 crc kubenswrapper[5039]: E1124 13:35:18.464326 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dc29372-3c0b-496e-b027-e57abc3ca956" containerName="pull" Nov 24 13:35:18 crc kubenswrapper[5039]: I1124 13:35:18.464339 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dc29372-3c0b-496e-b027-e57abc3ca956" containerName="pull" Nov 24 13:35:18 crc kubenswrapper[5039]: E1124 13:35:18.464384 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dc29372-3c0b-496e-b027-e57abc3ca956" containerName="extract" Nov 24 13:35:18 crc kubenswrapper[5039]: I1124 13:35:18.464402 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dc29372-3c0b-496e-b027-e57abc3ca956" containerName="extract" Nov 24 13:35:18 crc kubenswrapper[5039]: I1124 13:35:18.464999 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0dc29372-3c0b-496e-b027-e57abc3ca956" containerName="extract" Nov 24 13:35:18 crc kubenswrapper[5039]: I1124 13:35:18.466293 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-6ccdcd8b77-9k4cr" Nov 24 13:35:18 crc kubenswrapper[5039]: I1124 13:35:18.471331 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-krx4l" Nov 24 13:35:18 crc kubenswrapper[5039]: I1124 13:35:18.497293 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-6ccdcd8b77-9k4cr"] Nov 24 13:35:18 crc kubenswrapper[5039]: I1124 13:35:18.515790 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vsjn\" (UniqueName: \"kubernetes.io/projected/74fee1bc-d496-4e8d-9884-ce1b67a00e75-kube-api-access-5vsjn\") pod \"openstack-operator-controller-operator-6ccdcd8b77-9k4cr\" (UID: \"74fee1bc-d496-4e8d-9884-ce1b67a00e75\") " pod="openstack-operators/openstack-operator-controller-operator-6ccdcd8b77-9k4cr" Nov 24 13:35:18 crc kubenswrapper[5039]: I1124 13:35:18.622461 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vsjn\" (UniqueName: \"kubernetes.io/projected/74fee1bc-d496-4e8d-9884-ce1b67a00e75-kube-api-access-5vsjn\") pod \"openstack-operator-controller-operator-6ccdcd8b77-9k4cr\" (UID: \"74fee1bc-d496-4e8d-9884-ce1b67a00e75\") " pod="openstack-operators/openstack-operator-controller-operator-6ccdcd8b77-9k4cr" Nov 24 13:35:18 crc kubenswrapper[5039]: I1124 13:35:18.640840 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vsjn\" (UniqueName: \"kubernetes.io/projected/74fee1bc-d496-4e8d-9884-ce1b67a00e75-kube-api-access-5vsjn\") pod \"openstack-operator-controller-operator-6ccdcd8b77-9k4cr\" (UID: \"74fee1bc-d496-4e8d-9884-ce1b67a00e75\") " pod="openstack-operators/openstack-operator-controller-operator-6ccdcd8b77-9k4cr" Nov 24 13:35:18 crc kubenswrapper[5039]: I1124 13:35:18.792194 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-6ccdcd8b77-9k4cr" Nov 24 13:35:19 crc kubenswrapper[5039]: I1124 13:35:19.429156 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-6ccdcd8b77-9k4cr"] Nov 24 13:35:20 crc kubenswrapper[5039]: I1124 13:35:20.181387 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-6ccdcd8b77-9k4cr" event={"ID":"74fee1bc-d496-4e8d-9884-ce1b67a00e75","Type":"ContainerStarted","Data":"f1013d70b3297f6f352e8af47f0699ff5e4233021ffab62bdf792242104315fc"} Nov 24 13:35:24 crc kubenswrapper[5039]: I1124 13:35:24.213600 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-6ccdcd8b77-9k4cr" event={"ID":"74fee1bc-d496-4e8d-9884-ce1b67a00e75","Type":"ContainerStarted","Data":"8c3c35200e85b92ba0bbd13d3cee03a667ca370ff11f40852634b531d49ab2f0"} Nov 24 13:35:24 crc kubenswrapper[5039]: I1124 13:35:24.214942 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-6ccdcd8b77-9k4cr" Nov 24 13:35:28 crc kubenswrapper[5039]: I1124 13:35:28.794263 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-6ccdcd8b77-9k4cr" Nov 24 13:35:28 crc kubenswrapper[5039]: I1124 13:35:28.826078 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-6ccdcd8b77-9k4cr" podStartSLOduration=6.489646291 podStartE2EDuration="10.826058138s" podCreationTimestamp="2025-11-24 13:35:18 +0000 UTC" firstStartedPulling="2025-11-24 13:35:19.450936834 +0000 UTC m=+1031.890061334" lastFinishedPulling="2025-11-24 13:35:23.787348681 +0000 UTC m=+1036.226473181" observedRunningTime="2025-11-24 13:35:24.240881158 +0000 UTC m=+1036.680005668" watchObservedRunningTime="2025-11-24 13:35:28.826058138 +0000 UTC m=+1041.265182638" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.700616 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-sgqwz"] Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.702566 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-sgqwz" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.704364 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-xh92v" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.715786 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-whklh"] Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.717095 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-whklh" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.720101 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-8cqlq" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.723756 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-6nwfx"] Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.724916 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-6nwfx" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.728426 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-57fd6" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.732798 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-sgqwz"] Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.744361 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-6nwfx"] Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.769306 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-whklh"] Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.774060 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-l8cvk"] Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.775123 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-l8cvk" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.779791 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-wzzjl" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.785433 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-bgn7g"] Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.791787 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bgn7g" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.804004 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-ngrvv" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.804220 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-l8cvk"] Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.804239 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5lp8\" (UniqueName: \"kubernetes.io/projected/396e7965-a743-4028-989b-e3610abb5a3a-kube-api-access-r5lp8\") pod \"cinder-operator-controller-manager-79856dc55c-whklh\" (UID: \"396e7965-a743-4028-989b-e3610abb5a3a\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-whklh" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.804279 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2z9sz\" (UniqueName: \"kubernetes.io/projected/b913096c-9ece-4755-9545-0116fbc53123-kube-api-access-2z9sz\") pod \"barbican-operator-controller-manager-86dc4d89c8-sgqwz\" (UID: \"b913096c-9ece-4755-9545-0116fbc53123\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-sgqwz" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.804301 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7l59l\" (UniqueName: \"kubernetes.io/projected/99f1711f-1dd9-471d-9a2d-8c6e0a46fb0d-kube-api-access-7l59l\") pod \"designate-operator-controller-manager-7d695c9b56-6nwfx\" (UID: \"99f1711f-1dd9-471d-9a2d-8c6e0a46fb0d\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-6nwfx" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.822173 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-69pxs"] Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.823585 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-69pxs" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.831851 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-x4qh7" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.843729 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-bgn7g"] Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.865879 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-69pxs"] Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.876091 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-68gjk"] Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.877308 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-68gjk" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.880491 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-6mwqx" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.909666 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5lp8\" (UniqueName: \"kubernetes.io/projected/396e7965-a743-4028-989b-e3610abb5a3a-kube-api-access-r5lp8\") pod \"cinder-operator-controller-manager-79856dc55c-whklh\" (UID: \"396e7965-a743-4028-989b-e3610abb5a3a\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-whklh" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.909749 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2z9sz\" (UniqueName: \"kubernetes.io/projected/b913096c-9ece-4755-9545-0116fbc53123-kube-api-access-2z9sz\") pod \"barbican-operator-controller-manager-86dc4d89c8-sgqwz\" (UID: \"b913096c-9ece-4755-9545-0116fbc53123\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-sgqwz" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.909777 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7l59l\" (UniqueName: \"kubernetes.io/projected/99f1711f-1dd9-471d-9a2d-8c6e0a46fb0d-kube-api-access-7l59l\") pod \"designate-operator-controller-manager-7d695c9b56-6nwfx\" (UID: \"99f1711f-1dd9-471d-9a2d-8c6e0a46fb0d\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-6nwfx" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.909909 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6kh2s\" (UniqueName: \"kubernetes.io/projected/eee87172-9357-412c-8eb2-7df01649f1d0-kube-api-access-6kh2s\") pod \"horizon-operator-controller-manager-68c9694994-69pxs\" (UID: \"eee87172-9357-412c-8eb2-7df01649f1d0\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-69pxs" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.909974 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bn22l\" (UniqueName: \"kubernetes.io/projected/0067c9ac-5dfc-4e0d-b316-161e02698ffd-kube-api-access-bn22l\") pod \"glance-operator-controller-manager-68b95954c9-l8cvk\" (UID: \"0067c9ac-5dfc-4e0d-b316-161e02698ffd\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-l8cvk" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.910071 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vl87j\" (UniqueName: \"kubernetes.io/projected/bea08559-78a5-4287-85e8-a83768d94670-kube-api-access-vl87j\") pod \"heat-operator-controller-manager-774b86978c-bgn7g\" (UID: \"bea08559-78a5-4287-85e8-a83768d94670\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-bgn7g" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.917586 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp"] Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.938299 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.948130 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-7rwb6" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.948333 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.971112 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7l59l\" (UniqueName: \"kubernetes.io/projected/99f1711f-1dd9-471d-9a2d-8c6e0a46fb0d-kube-api-access-7l59l\") pod \"designate-operator-controller-manager-7d695c9b56-6nwfx\" (UID: \"99f1711f-1dd9-471d-9a2d-8c6e0a46fb0d\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-6nwfx" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.973566 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-2lmlh"] Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.975211 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2lmlh" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.980933 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-jfqcz" Nov 24 13:35:43 crc kubenswrapper[5039]: I1124 13:35:43.984623 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5lp8\" (UniqueName: \"kubernetes.io/projected/396e7965-a743-4028-989b-e3610abb5a3a-kube-api-access-r5lp8\") pod \"cinder-operator-controller-manager-79856dc55c-whklh\" (UID: \"396e7965-a743-4028-989b-e3610abb5a3a\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-whklh" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.006249 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2z9sz\" (UniqueName: \"kubernetes.io/projected/b913096c-9ece-4755-9545-0116fbc53123-kube-api-access-2z9sz\") pod \"barbican-operator-controller-manager-86dc4d89c8-sgqwz\" (UID: \"b913096c-9ece-4755-9545-0116fbc53123\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-sgqwz" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.012389 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bd656299-f7da-4ca8-aee9-25c389243cc9-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-j7gmp\" (UID: \"bd656299-f7da-4ca8-aee9-25c389243cc9\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.012494 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6kh2s\" (UniqueName: \"kubernetes.io/projected/eee87172-9357-412c-8eb2-7df01649f1d0-kube-api-access-6kh2s\") pod \"horizon-operator-controller-manager-68c9694994-69pxs\" (UID: \"eee87172-9357-412c-8eb2-7df01649f1d0\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-69pxs" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.012547 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bn22l\" (UniqueName: \"kubernetes.io/projected/0067c9ac-5dfc-4e0d-b316-161e02698ffd-kube-api-access-bn22l\") pod \"glance-operator-controller-manager-68b95954c9-l8cvk\" (UID: \"0067c9ac-5dfc-4e0d-b316-161e02698ffd\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-l8cvk" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.012575 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpzrs\" (UniqueName: \"kubernetes.io/projected/bd656299-f7da-4ca8-aee9-25c389243cc9-kube-api-access-mpzrs\") pod \"infra-operator-controller-manager-d5cc86f4b-j7gmp\" (UID: \"bd656299-f7da-4ca8-aee9-25c389243cc9\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.012606 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4s28\" (UniqueName: \"kubernetes.io/projected/35f14195-18aa-433d-8705-1aa24a8a1818-kube-api-access-z4s28\") pod \"ironic-operator-controller-manager-5bfcdc958c-68gjk\" (UID: \"35f14195-18aa-433d-8705-1aa24a8a1818\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-68gjk" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.012656 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vl87j\" (UniqueName: \"kubernetes.io/projected/bea08559-78a5-4287-85e8-a83768d94670-kube-api-access-vl87j\") pod \"heat-operator-controller-manager-774b86978c-bgn7g\" (UID: \"bea08559-78a5-4287-85e8-a83768d94670\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-bgn7g" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.023119 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-68gjk"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.030847 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-sgqwz" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.039515 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-nng76"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.041250 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-nng76" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.051562 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-k48vb" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.055249 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.055812 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-whklh" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.065045 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-2lmlh"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.071288 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-nng76"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.071845 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-6nwfx" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.074081 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kh2s\" (UniqueName: \"kubernetes.io/projected/eee87172-9357-412c-8eb2-7df01649f1d0-kube-api-access-6kh2s\") pod \"horizon-operator-controller-manager-68c9694994-69pxs\" (UID: \"eee87172-9357-412c-8eb2-7df01649f1d0\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-69pxs" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.077135 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bn22l\" (UniqueName: \"kubernetes.io/projected/0067c9ac-5dfc-4e0d-b316-161e02698ffd-kube-api-access-bn22l\") pod \"glance-operator-controller-manager-68b95954c9-l8cvk\" (UID: \"0067c9ac-5dfc-4e0d-b316-161e02698ffd\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-l8cvk" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.077848 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vl87j\" (UniqueName: \"kubernetes.io/projected/bea08559-78a5-4287-85e8-a83768d94670-kube-api-access-vl87j\") pod \"heat-operator-controller-manager-774b86978c-bgn7g\" (UID: \"bea08559-78a5-4287-85e8-a83768d94670\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-bgn7g" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.088569 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phdb6"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.090020 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rvq7d"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.091142 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rvq7d" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.091556 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phdb6" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.099344 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-s2tc7" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.099375 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-nwsck"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.103387 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nwsck" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.099351 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-t7fhj" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.110108 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-srvpt" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.110907 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-l8cvk" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.114359 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bd656299-f7da-4ca8-aee9-25c389243cc9-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-j7gmp\" (UID: \"bd656299-f7da-4ca8-aee9-25c389243cc9\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp" Nov 24 13:35:44 crc kubenswrapper[5039]: E1124 13:35:44.114660 5039 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 24 13:35:44 crc kubenswrapper[5039]: E1124 13:35:44.114894 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd656299-f7da-4ca8-aee9-25c389243cc9-cert podName:bd656299-f7da-4ca8-aee9-25c389243cc9 nodeName:}" failed. No retries permitted until 2025-11-24 13:35:44.614874458 +0000 UTC m=+1057.053998958 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/bd656299-f7da-4ca8-aee9-25c389243cc9-cert") pod "infra-operator-controller-manager-d5cc86f4b-j7gmp" (UID: "bd656299-f7da-4ca8-aee9-25c389243cc9") : secret "infra-operator-webhook-server-cert" not found Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.115044 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpzrs\" (UniqueName: \"kubernetes.io/projected/bd656299-f7da-4ca8-aee9-25c389243cc9-kube-api-access-mpzrs\") pod \"infra-operator-controller-manager-d5cc86f4b-j7gmp\" (UID: \"bd656299-f7da-4ca8-aee9-25c389243cc9\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.115157 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4s28\" (UniqueName: \"kubernetes.io/projected/35f14195-18aa-433d-8705-1aa24a8a1818-kube-api-access-z4s28\") pod \"ironic-operator-controller-manager-5bfcdc958c-68gjk\" (UID: \"35f14195-18aa-433d-8705-1aa24a8a1818\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-68gjk" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.115280 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6wf4\" (UniqueName: \"kubernetes.io/projected/29991995-423a-42c0-ae52-2b3c160a3e0c-kube-api-access-d6wf4\") pod \"keystone-operator-controller-manager-748dc6576f-2lmlh\" (UID: \"29991995-423a-42c0-ae52-2b3c160a3e0c\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2lmlh" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.115409 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gh64t\" (UniqueName: \"kubernetes.io/projected/979a5bac-57c9-4d42-9af6-11228e980f7f-kube-api-access-gh64t\") pod \"manila-operator-controller-manager-58bb8d67cc-nng76\" (UID: \"979a5bac-57c9-4d42-9af6-11228e980f7f\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-nng76" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.124398 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bgn7g" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.132042 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phdb6"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.142256 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4s28\" (UniqueName: \"kubernetes.io/projected/35f14195-18aa-433d-8705-1aa24a8a1818-kube-api-access-z4s28\") pod \"ironic-operator-controller-manager-5bfcdc958c-68gjk\" (UID: \"35f14195-18aa-433d-8705-1aa24a8a1818\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-68gjk" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.149201 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rvq7d"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.154353 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-69pxs" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.163037 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpzrs\" (UniqueName: \"kubernetes.io/projected/bd656299-f7da-4ca8-aee9-25c389243cc9-kube-api-access-mpzrs\") pod \"infra-operator-controller-manager-d5cc86f4b-j7gmp\" (UID: \"bd656299-f7da-4ca8-aee9-25c389243cc9\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.184451 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-nwsck"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.192473 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-88sqn"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.196855 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-88sqn" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.197437 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-68gjk" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.201545 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-9v7s6" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.218052 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gh64t\" (UniqueName: \"kubernetes.io/projected/979a5bac-57c9-4d42-9af6-11228e980f7f-kube-api-access-gh64t\") pod \"manila-operator-controller-manager-58bb8d67cc-nng76\" (UID: \"979a5bac-57c9-4d42-9af6-11228e980f7f\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-nng76" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.218118 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ht55n\" (UniqueName: \"kubernetes.io/projected/3c29a4a4-1d0c-4a1f-a4b5-a67cb564707a-kube-api-access-ht55n\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-phdb6\" (UID: \"3c29a4a4-1d0c-4a1f-a4b5-a67cb564707a\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phdb6" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.218183 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnsbw\" (UniqueName: \"kubernetes.io/projected/98b88919-04d2-4c01-b45a-dd72afbbe179-kube-api-access-wnsbw\") pod \"nova-operator-controller-manager-79556f57fc-nwsck\" (UID: \"98b88919-04d2-4c01-b45a-dd72afbbe179\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nwsck" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.218256 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bsrqr\" (UniqueName: \"kubernetes.io/projected/edf2350a-f77f-45ec-87c1-35f7b38ddcb3-kube-api-access-bsrqr\") pod \"neutron-operator-controller-manager-7c57c8bbc4-rvq7d\" (UID: \"edf2350a-f77f-45ec-87c1-35f7b38ddcb3\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rvq7d" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.218325 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6wf4\" (UniqueName: \"kubernetes.io/projected/29991995-423a-42c0-ae52-2b3c160a3e0c-kube-api-access-d6wf4\") pod \"keystone-operator-controller-manager-748dc6576f-2lmlh\" (UID: \"29991995-423a-42c0-ae52-2b3c160a3e0c\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2lmlh" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.223568 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.226753 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.229713 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-dwq4w" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.230454 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.234405 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-88sqn"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.268100 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6wf4\" (UniqueName: \"kubernetes.io/projected/29991995-423a-42c0-ae52-2b3c160a3e0c-kube-api-access-d6wf4\") pod \"keystone-operator-controller-manager-748dc6576f-2lmlh\" (UID: \"29991995-423a-42c0-ae52-2b3c160a3e0c\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2lmlh" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.272523 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gh64t\" (UniqueName: \"kubernetes.io/projected/979a5bac-57c9-4d42-9af6-11228e980f7f-kube-api-access-gh64t\") pod \"manila-operator-controller-manager-58bb8d67cc-nng76\" (UID: \"979a5bac-57c9-4d42-9af6-11228e980f7f\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-nng76" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.276344 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.298775 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-ft57m"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.309845 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-ft57m" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.311907 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-4fcwn" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.325465 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3731fd87-4c6a-4fb0-a3d5-cf48e76a5448-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb\" (UID: \"3731fd87-4c6a-4fb0-a3d5-cf48e76a5448\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.325574 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ht55n\" (UniqueName: \"kubernetes.io/projected/3c29a4a4-1d0c-4a1f-a4b5-a67cb564707a-kube-api-access-ht55n\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-phdb6\" (UID: \"3c29a4a4-1d0c-4a1f-a4b5-a67cb564707a\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phdb6" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.325605 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nss9c\" (UniqueName: \"kubernetes.io/projected/3731fd87-4c6a-4fb0-a3d5-cf48e76a5448-kube-api-access-nss9c\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb\" (UID: \"3731fd87-4c6a-4fb0-a3d5-cf48e76a5448\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.325628 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djklx\" (UniqueName: \"kubernetes.io/projected/e97ae0ee-d044-4b9d-a371-eec59a5ff932-kube-api-access-djklx\") pod \"octavia-operator-controller-manager-fd75fd47d-88sqn\" (UID: \"e97ae0ee-d044-4b9d-a371-eec59a5ff932\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-88sqn" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.325650 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnsbw\" (UniqueName: \"kubernetes.io/projected/98b88919-04d2-4c01-b45a-dd72afbbe179-kube-api-access-wnsbw\") pod \"nova-operator-controller-manager-79556f57fc-nwsck\" (UID: \"98b88919-04d2-4c01-b45a-dd72afbbe179\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nwsck" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.325708 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bsrqr\" (UniqueName: \"kubernetes.io/projected/edf2350a-f77f-45ec-87c1-35f7b38ddcb3-kube-api-access-bsrqr\") pod \"neutron-operator-controller-manager-7c57c8bbc4-rvq7d\" (UID: \"edf2350a-f77f-45ec-87c1-35f7b38ddcb3\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rvq7d" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.355315 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bsrqr\" (UniqueName: \"kubernetes.io/projected/edf2350a-f77f-45ec-87c1-35f7b38ddcb3-kube-api-access-bsrqr\") pod \"neutron-operator-controller-manager-7c57c8bbc4-rvq7d\" (UID: \"edf2350a-f77f-45ec-87c1-35f7b38ddcb3\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rvq7d" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.360342 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnsbw\" (UniqueName: \"kubernetes.io/projected/98b88919-04d2-4c01-b45a-dd72afbbe179-kube-api-access-wnsbw\") pod \"nova-operator-controller-manager-79556f57fc-nwsck\" (UID: \"98b88919-04d2-4c01-b45a-dd72afbbe179\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nwsck" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.365654 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ht55n\" (UniqueName: \"kubernetes.io/projected/3c29a4a4-1d0c-4a1f-a4b5-a67cb564707a-kube-api-access-ht55n\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-phdb6\" (UID: \"3c29a4a4-1d0c-4a1f-a4b5-a67cb564707a\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phdb6" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.375959 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-hwn8q"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.387372 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-qzwtb"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.389759 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-hwn8q" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.398336 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-qzwtb"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.398420 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-qzwtb" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.399854 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-6jb7g" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.400098 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-tfkfx" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.408625 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-ft57m"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.426626 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3731fd87-4c6a-4fb0-a3d5-cf48e76a5448-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb\" (UID: \"3731fd87-4c6a-4fb0-a3d5-cf48e76a5448\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.426715 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nss9c\" (UniqueName: \"kubernetes.io/projected/3731fd87-4c6a-4fb0-a3d5-cf48e76a5448-kube-api-access-nss9c\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb\" (UID: \"3731fd87-4c6a-4fb0-a3d5-cf48e76a5448\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.426765 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djklx\" (UniqueName: \"kubernetes.io/projected/e97ae0ee-d044-4b9d-a371-eec59a5ff932-kube-api-access-djklx\") pod \"octavia-operator-controller-manager-fd75fd47d-88sqn\" (UID: \"e97ae0ee-d044-4b9d-a371-eec59a5ff932\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-88sqn" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.426834 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b84fg\" (UniqueName: \"kubernetes.io/projected/058dcaa2-f18f-4eff-bfd1-d290a8fd36a1-kube-api-access-b84fg\") pod \"ovn-operator-controller-manager-66cf5c67ff-ft57m\" (UID: \"058dcaa2-f18f-4eff-bfd1-d290a8fd36a1\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-ft57m" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.427836 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9t58m\" (UniqueName: \"kubernetes.io/projected/865f4099-70b9-45a1-9bcd-c92882c9aab1-kube-api-access-9t58m\") pod \"placement-operator-controller-manager-5db546f9d9-hwn8q\" (UID: \"865f4099-70b9-45a1-9bcd-c92882c9aab1\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-hwn8q" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.427931 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gcnnl\" (UniqueName: \"kubernetes.io/projected/90ff7526-7243-45b2-afaa-ee39dff42b46-kube-api-access-gcnnl\") pod \"swift-operator-controller-manager-6fdc4fcf86-qzwtb\" (UID: \"90ff7526-7243-45b2-afaa-ee39dff42b46\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-qzwtb" Nov 24 13:35:44 crc kubenswrapper[5039]: E1124 13:35:44.429955 5039 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 24 13:35:44 crc kubenswrapper[5039]: E1124 13:35:44.430021 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3731fd87-4c6a-4fb0-a3d5-cf48e76a5448-cert podName:3731fd87-4c6a-4fb0-a3d5-cf48e76a5448 nodeName:}" failed. No retries permitted until 2025-11-24 13:35:44.930001455 +0000 UTC m=+1057.369126035 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/3731fd87-4c6a-4fb0-a3d5-cf48e76a5448-cert") pod "openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" (UID: "3731fd87-4c6a-4fb0-a3d5-cf48e76a5448") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.442857 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2lmlh" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.461848 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-hwn8q"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.471663 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djklx\" (UniqueName: \"kubernetes.io/projected/e97ae0ee-d044-4b9d-a371-eec59a5ff932-kube-api-access-djklx\") pod \"octavia-operator-controller-manager-fd75fd47d-88sqn\" (UID: \"e97ae0ee-d044-4b9d-a371-eec59a5ff932\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-88sqn" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.473664 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-nng76" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.496711 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nss9c\" (UniqueName: \"kubernetes.io/projected/3731fd87-4c6a-4fb0-a3d5-cf48e76a5448-kube-api-access-nss9c\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb\" (UID: \"3731fd87-4c6a-4fb0-a3d5-cf48e76a5448\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.517990 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rvq7d" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.530313 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b84fg\" (UniqueName: \"kubernetes.io/projected/058dcaa2-f18f-4eff-bfd1-d290a8fd36a1-kube-api-access-b84fg\") pod \"ovn-operator-controller-manager-66cf5c67ff-ft57m\" (UID: \"058dcaa2-f18f-4eff-bfd1-d290a8fd36a1\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-ft57m" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.530577 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9t58m\" (UniqueName: \"kubernetes.io/projected/865f4099-70b9-45a1-9bcd-c92882c9aab1-kube-api-access-9t58m\") pod \"placement-operator-controller-manager-5db546f9d9-hwn8q\" (UID: \"865f4099-70b9-45a1-9bcd-c92882c9aab1\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-hwn8q" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.530675 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gcnnl\" (UniqueName: \"kubernetes.io/projected/90ff7526-7243-45b2-afaa-ee39dff42b46-kube-api-access-gcnnl\") pod \"swift-operator-controller-manager-6fdc4fcf86-qzwtb\" (UID: \"90ff7526-7243-45b2-afaa-ee39dff42b46\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-qzwtb" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.531304 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-bf6985ffc-g86nb"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.556773 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-bf6985ffc-g86nb" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.568988 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-zn6gb" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.572074 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phdb6" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.574126 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9t58m\" (UniqueName: \"kubernetes.io/projected/865f4099-70b9-45a1-9bcd-c92882c9aab1-kube-api-access-9t58m\") pod \"placement-operator-controller-manager-5db546f9d9-hwn8q\" (UID: \"865f4099-70b9-45a1-9bcd-c92882c9aab1\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-hwn8q" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.597699 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b84fg\" (UniqueName: \"kubernetes.io/projected/058dcaa2-f18f-4eff-bfd1-d290a8fd36a1-kube-api-access-b84fg\") pod \"ovn-operator-controller-manager-66cf5c67ff-ft57m\" (UID: \"058dcaa2-f18f-4eff-bfd1-d290a8fd36a1\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-ft57m" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.606379 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gcnnl\" (UniqueName: \"kubernetes.io/projected/90ff7526-7243-45b2-afaa-ee39dff42b46-kube-api-access-gcnnl\") pod \"swift-operator-controller-manager-6fdc4fcf86-qzwtb\" (UID: \"90ff7526-7243-45b2-afaa-ee39dff42b46\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-qzwtb" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.626590 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nwsck" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.634019 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bd656299-f7da-4ca8-aee9-25c389243cc9-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-j7gmp\" (UID: \"bd656299-f7da-4ca8-aee9-25c389243cc9\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp" Nov 24 13:35:44 crc kubenswrapper[5039]: E1124 13:35:44.634234 5039 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 24 13:35:44 crc kubenswrapper[5039]: E1124 13:35:44.634302 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd656299-f7da-4ca8-aee9-25c389243cc9-cert podName:bd656299-f7da-4ca8-aee9-25c389243cc9 nodeName:}" failed. No retries permitted until 2025-11-24 13:35:45.634284453 +0000 UTC m=+1058.073408953 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/bd656299-f7da-4ca8-aee9-25c389243cc9-cert") pod "infra-operator-controller-manager-d5cc86f4b-j7gmp" (UID: "bd656299-f7da-4ca8-aee9-25c389243cc9") : secret "infra-operator-webhook-server-cert" not found Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.636107 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-88sqn" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.653208 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-bf6985ffc-g86nb"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.681090 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-ft57m" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.743164 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69slr\" (UniqueName: \"kubernetes.io/projected/7f6bb6a1-8df6-4d15-8d27-a5bbc28b9b31-kube-api-access-69slr\") pod \"telemetry-operator-controller-manager-bf6985ffc-g86nb\" (UID: \"7f6bb6a1-8df6-4d15-8d27-a5bbc28b9b31\") " pod="openstack-operators/telemetry-operator-controller-manager-bf6985ffc-g86nb" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.770896 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-hwn8q" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.782562 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-qk6bz"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.783889 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cb74df96-qk6bz" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.791922 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-flj4t" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.794073 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-qk6bz"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.797262 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-qzwtb" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.844818 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69slr\" (UniqueName: \"kubernetes.io/projected/7f6bb6a1-8df6-4d15-8d27-a5bbc28b9b31-kube-api-access-69slr\") pod \"telemetry-operator-controller-manager-bf6985ffc-g86nb\" (UID: \"7f6bb6a1-8df6-4d15-8d27-a5bbc28b9b31\") " pod="openstack-operators/telemetry-operator-controller-manager-bf6985ffc-g86nb" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.867554 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-kg5jg"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.869143 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-864885998-kg5jg" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.889115 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-vkrqb" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.890580 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69slr\" (UniqueName: \"kubernetes.io/projected/7f6bb6a1-8df6-4d15-8d27-a5bbc28b9b31-kube-api-access-69slr\") pod \"telemetry-operator-controller-manager-bf6985ffc-g86nb\" (UID: \"7f6bb6a1-8df6-4d15-8d27-a5bbc28b9b31\") " pod="openstack-operators/telemetry-operator-controller-manager-bf6985ffc-g86nb" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.906564 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-kg5jg"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.950228 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4zhk\" (UniqueName: \"kubernetes.io/projected/6cbb9e3e-f545-4d83-aee4-8e122c54437c-kube-api-access-t4zhk\") pod \"test-operator-controller-manager-5cb74df96-qk6bz\" (UID: \"6cbb9e3e-f545-4d83-aee4-8e122c54437c\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-qk6bz" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.950303 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3731fd87-4c6a-4fb0-a3d5-cf48e76a5448-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb\" (UID: \"3731fd87-4c6a-4fb0-a3d5-cf48e76a5448\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.950368 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nf9l\" (UniqueName: \"kubernetes.io/projected/ab34fb1d-70af-4438-86c7-3856f1733097-kube-api-access-4nf9l\") pod \"watcher-operator-controller-manager-864885998-kg5jg\" (UID: \"ab34fb1d-70af-4438-86c7-3856f1733097\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-kg5jg" Nov 24 13:35:44 crc kubenswrapper[5039]: E1124 13:35:44.950518 5039 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 24 13:35:44 crc kubenswrapper[5039]: E1124 13:35:44.950563 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3731fd87-4c6a-4fb0-a3d5-cf48e76a5448-cert podName:3731fd87-4c6a-4fb0-a3d5-cf48e76a5448 nodeName:}" failed. No retries permitted until 2025-11-24 13:35:45.950547888 +0000 UTC m=+1058.389672388 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/3731fd87-4c6a-4fb0-a3d5-cf48e76a5448-cert") pod "openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" (UID: "3731fd87-4c6a-4fb0-a3d5-cf48e76a5448") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.952741 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.954041 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.956798 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-bf6985ffc-g86nb" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.959381 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.965717 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.965773 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-pnq4t" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.965827 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.984894 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-j9qqq"] Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.987989 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-j9qqq" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.991058 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-krbft" Nov 24 13:35:44 crc kubenswrapper[5039]: I1124 13:35:44.991170 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-j9qqq"] Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.002430 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-6nwfx"] Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.032884 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-sgqwz"] Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.051739 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t4zhk\" (UniqueName: \"kubernetes.io/projected/6cbb9e3e-f545-4d83-aee4-8e122c54437c-kube-api-access-t4zhk\") pod \"test-operator-controller-manager-5cb74df96-qk6bz\" (UID: \"6cbb9e3e-f545-4d83-aee4-8e122c54437c\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-qk6bz" Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.051855 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-webhook-certs\") pod \"openstack-operator-controller-manager-7b5fb95979-n45b6\" (UID: \"0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a\") " pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.051896 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-metrics-certs\") pod \"openstack-operator-controller-manager-7b5fb95979-n45b6\" (UID: \"0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a\") " pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.051941 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nllmb\" (UniqueName: \"kubernetes.io/projected/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-kube-api-access-nllmb\") pod \"openstack-operator-controller-manager-7b5fb95979-n45b6\" (UID: \"0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a\") " pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.052037 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zk8wl\" (UniqueName: \"kubernetes.io/projected/076a99d2-27b3-4d08-bdcc-876e1dec4f5f-kube-api-access-zk8wl\") pod \"rabbitmq-cluster-operator-manager-668c99d594-j9qqq\" (UID: \"076a99d2-27b3-4d08-bdcc-876e1dec4f5f\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-j9qqq" Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.052095 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nf9l\" (UniqueName: \"kubernetes.io/projected/ab34fb1d-70af-4438-86c7-3856f1733097-kube-api-access-4nf9l\") pod \"watcher-operator-controller-manager-864885998-kg5jg\" (UID: \"ab34fb1d-70af-4438-86c7-3856f1733097\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-kg5jg" Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.077721 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4zhk\" (UniqueName: \"kubernetes.io/projected/6cbb9e3e-f545-4d83-aee4-8e122c54437c-kube-api-access-t4zhk\") pod \"test-operator-controller-manager-5cb74df96-qk6bz\" (UID: \"6cbb9e3e-f545-4d83-aee4-8e122c54437c\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-qk6bz" Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.078564 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nf9l\" (UniqueName: \"kubernetes.io/projected/ab34fb1d-70af-4438-86c7-3856f1733097-kube-api-access-4nf9l\") pod \"watcher-operator-controller-manager-864885998-kg5jg\" (UID: \"ab34fb1d-70af-4438-86c7-3856f1733097\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-kg5jg" Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.097610 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-whklh"] Nov 24 13:35:45 crc kubenswrapper[5039]: W1124 13:35:45.122547 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb913096c_9ece_4755_9545_0116fbc53123.slice/crio-97d12caea7b12efe7788ad6d2b0379503facf5262da9912a84ff8158fc5e9493 WatchSource:0}: Error finding container 97d12caea7b12efe7788ad6d2b0379503facf5262da9912a84ff8158fc5e9493: Status 404 returned error can't find the container with id 97d12caea7b12efe7788ad6d2b0379503facf5262da9912a84ff8158fc5e9493 Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.150803 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cb74df96-qk6bz" Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.154773 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nllmb\" (UniqueName: \"kubernetes.io/projected/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-kube-api-access-nllmb\") pod \"openstack-operator-controller-manager-7b5fb95979-n45b6\" (UID: \"0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a\") " pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.154811 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zk8wl\" (UniqueName: \"kubernetes.io/projected/076a99d2-27b3-4d08-bdcc-876e1dec4f5f-kube-api-access-zk8wl\") pod \"rabbitmq-cluster-operator-manager-668c99d594-j9qqq\" (UID: \"076a99d2-27b3-4d08-bdcc-876e1dec4f5f\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-j9qqq" Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.154916 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-webhook-certs\") pod \"openstack-operator-controller-manager-7b5fb95979-n45b6\" (UID: \"0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a\") " pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.154939 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-metrics-certs\") pod \"openstack-operator-controller-manager-7b5fb95979-n45b6\" (UID: \"0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a\") " pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" Nov 24 13:35:45 crc kubenswrapper[5039]: E1124 13:35:45.155063 5039 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 24 13:35:45 crc kubenswrapper[5039]: E1124 13:35:45.155111 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-metrics-certs podName:0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a nodeName:}" failed. No retries permitted until 2025-11-24 13:35:45.655096153 +0000 UTC m=+1058.094220653 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-metrics-certs") pod "openstack-operator-controller-manager-7b5fb95979-n45b6" (UID: "0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a") : secret "metrics-server-cert" not found Nov 24 13:35:45 crc kubenswrapper[5039]: E1124 13:35:45.155590 5039 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 24 13:35:45 crc kubenswrapper[5039]: E1124 13:35:45.155615 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-webhook-certs podName:0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a nodeName:}" failed. No retries permitted until 2025-11-24 13:35:45.655607686 +0000 UTC m=+1058.094732186 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-webhook-certs") pod "openstack-operator-controller-manager-7b5fb95979-n45b6" (UID: "0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a") : secret "webhook-server-cert" not found Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.199278 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zk8wl\" (UniqueName: \"kubernetes.io/projected/076a99d2-27b3-4d08-bdcc-876e1dec4f5f-kube-api-access-zk8wl\") pod \"rabbitmq-cluster-operator-manager-668c99d594-j9qqq\" (UID: \"076a99d2-27b3-4d08-bdcc-876e1dec4f5f\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-j9qqq" Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.203201 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nllmb\" (UniqueName: \"kubernetes.io/projected/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-kube-api-access-nllmb\") pod \"openstack-operator-controller-manager-7b5fb95979-n45b6\" (UID: \"0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a\") " pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.242244 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-864885998-kg5jg" Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.300526 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-j9qqq" Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.412748 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-sgqwz" event={"ID":"b913096c-9ece-4755-9545-0116fbc53123","Type":"ContainerStarted","Data":"97d12caea7b12efe7788ad6d2b0379503facf5262da9912a84ff8158fc5e9493"} Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.415176 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-whklh" event={"ID":"396e7965-a743-4028-989b-e3610abb5a3a","Type":"ContainerStarted","Data":"505e3e2dddbfebbd5e76642fd2674b130fb066e933e7f341d3a7ccd53c1c107d"} Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.418741 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-6nwfx" event={"ID":"99f1711f-1dd9-471d-9a2d-8c6e0a46fb0d","Type":"ContainerStarted","Data":"965fb0fb3069b2bfb225d89008a651ddf05c039f4b745ae9574e01b0a5d7bbc5"} Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.670351 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-webhook-certs\") pod \"openstack-operator-controller-manager-7b5fb95979-n45b6\" (UID: \"0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a\") " pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.670703 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-metrics-certs\") pod \"openstack-operator-controller-manager-7b5fb95979-n45b6\" (UID: \"0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a\") " pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" Nov 24 13:35:45 crc kubenswrapper[5039]: E1124 13:35:45.670584 5039 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.670827 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bd656299-f7da-4ca8-aee9-25c389243cc9-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-j7gmp\" (UID: \"bd656299-f7da-4ca8-aee9-25c389243cc9\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp" Nov 24 13:35:45 crc kubenswrapper[5039]: E1124 13:35:45.670869 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-webhook-certs podName:0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a nodeName:}" failed. No retries permitted until 2025-11-24 13:35:46.67083575 +0000 UTC m=+1059.109960290 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-webhook-certs") pod "openstack-operator-controller-manager-7b5fb95979-n45b6" (UID: "0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a") : secret "webhook-server-cert" not found Nov 24 13:35:45 crc kubenswrapper[5039]: E1124 13:35:45.671009 5039 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 24 13:35:45 crc kubenswrapper[5039]: E1124 13:35:45.671041 5039 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 24 13:35:45 crc kubenswrapper[5039]: E1124 13:35:45.671083 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-metrics-certs podName:0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a nodeName:}" failed. No retries permitted until 2025-11-24 13:35:46.671065395 +0000 UTC m=+1059.110189895 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-metrics-certs") pod "openstack-operator-controller-manager-7b5fb95979-n45b6" (UID: "0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a") : secret "metrics-server-cert" not found Nov 24 13:35:45 crc kubenswrapper[5039]: E1124 13:35:45.671100 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd656299-f7da-4ca8-aee9-25c389243cc9-cert podName:bd656299-f7da-4ca8-aee9-25c389243cc9 nodeName:}" failed. No retries permitted until 2025-11-24 13:35:47.671092896 +0000 UTC m=+1060.110217396 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/bd656299-f7da-4ca8-aee9-25c389243cc9-cert") pod "infra-operator-controller-manager-d5cc86f4b-j7gmp" (UID: "bd656299-f7da-4ca8-aee9-25c389243cc9") : secret "infra-operator-webhook-server-cert" not found Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.696568 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-l8cvk"] Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.702439 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-69pxs"] Nov 24 13:35:45 crc kubenswrapper[5039]: W1124 13:35:45.708228 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeee87172_9357_412c_8eb2_7df01649f1d0.slice/crio-fa5675b921af1ef363fb53390dd63f5972236e4c162228fcfe4a42979a94c474 WatchSource:0}: Error finding container fa5675b921af1ef363fb53390dd63f5972236e4c162228fcfe4a42979a94c474: Status 404 returned error can't find the container with id fa5675b921af1ef363fb53390dd63f5972236e4c162228fcfe4a42979a94c474 Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.724768 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-68gjk"] Nov 24 13:35:45 crc kubenswrapper[5039]: W1124 13:35:45.727777 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod35f14195_18aa_433d_8705_1aa24a8a1818.slice/crio-191d25fdb9fba788ea3c0ad21505979c5522ca0517d36c7eb03b2e6c596e452a WatchSource:0}: Error finding container 191d25fdb9fba788ea3c0ad21505979c5522ca0517d36c7eb03b2e6c596e452a: Status 404 returned error can't find the container with id 191d25fdb9fba788ea3c0ad21505979c5522ca0517d36c7eb03b2e6c596e452a Nov 24 13:35:45 crc kubenswrapper[5039]: W1124 13:35:45.735301 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbea08559_78a5_4287_85e8_a83768d94670.slice/crio-27461315dd4deeb64dce0226da58273ecfd115c628ec91414eddad252e708cf7 WatchSource:0}: Error finding container 27461315dd4deeb64dce0226da58273ecfd115c628ec91414eddad252e708cf7: Status 404 returned error can't find the container with id 27461315dd4deeb64dce0226da58273ecfd115c628ec91414eddad252e708cf7 Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.736820 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-bgn7g"] Nov 24 13:35:45 crc kubenswrapper[5039]: I1124 13:35:45.976671 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3731fd87-4c6a-4fb0-a3d5-cf48e76a5448-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb\" (UID: \"3731fd87-4c6a-4fb0-a3d5-cf48e76a5448\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" Nov 24 13:35:45 crc kubenswrapper[5039]: E1124 13:35:45.976827 5039 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 24 13:35:45 crc kubenswrapper[5039]: E1124 13:35:45.976886 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3731fd87-4c6a-4fb0-a3d5-cf48e76a5448-cert podName:3731fd87-4c6a-4fb0-a3d5-cf48e76a5448 nodeName:}" failed. No retries permitted until 2025-11-24 13:35:47.976870606 +0000 UTC m=+1060.415995106 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/3731fd87-4c6a-4fb0-a3d5-cf48e76a5448-cert") pod "openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" (UID: "3731fd87-4c6a-4fb0-a3d5-cf48e76a5448") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.012180 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-2lmlh"] Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.026108 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-bf6985ffc-g86nb"] Nov 24 13:35:46 crc kubenswrapper[5039]: W1124 13:35:46.036262 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod29991995_423a_42c0_ae52_2b3c160a3e0c.slice/crio-32aaba3cc0fb48ac9511ecd2bae20198b3dd800137160a26d7a0554ee0ed05b1 WatchSource:0}: Error finding container 32aaba3cc0fb48ac9511ecd2bae20198b3dd800137160a26d7a0554ee0ed05b1: Status 404 returned error can't find the container with id 32aaba3cc0fb48ac9511ecd2bae20198b3dd800137160a26d7a0554ee0ed05b1 Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.041808 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rvq7d"] Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.054960 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-nng76"] Nov 24 13:35:46 crc kubenswrapper[5039]: W1124 13:35:46.063901 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3c29a4a4_1d0c_4a1f_a4b5_a67cb564707a.slice/crio-93c021a6140928da4db45e78f7f68c04c783ffc25af544cf5572ace961fa3502 WatchSource:0}: Error finding container 93c021a6140928da4db45e78f7f68c04c783ffc25af544cf5572ace961fa3502: Status 404 returned error can't find the container with id 93c021a6140928da4db45e78f7f68c04c783ffc25af544cf5572ace961fa3502 Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.073115 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phdb6"] Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.089793 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-ft57m"] Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.274611 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-nwsck"] Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.284464 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-qk6bz"] Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.302252 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-kg5jg"] Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.318131 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-j9qqq"] Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.318175 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-88sqn"] Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.321536 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-hwn8q"] Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.327171 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-qzwtb"] Nov 24 13:35:46 crc kubenswrapper[5039]: W1124 13:35:46.327253 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode97ae0ee_d044_4b9d_a371_eec59a5ff932.slice/crio-51a12945d85de524f01dac581736c2976deb3599e438959a3564621dc9bf215a WatchSource:0}: Error finding container 51a12945d85de524f01dac581736c2976deb3599e438959a3564621dc9bf215a: Status 404 returned error can't find the container with id 51a12945d85de524f01dac581736c2976deb3599e438959a3564621dc9bf215a Nov 24 13:35:46 crc kubenswrapper[5039]: E1124 13:35:46.331119 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-djklx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-fd75fd47d-88sqn_openstack-operators(e97ae0ee-d044-4b9d-a371-eec59a5ff932): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 13:35:46 crc kubenswrapper[5039]: W1124 13:35:46.333386 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podab34fb1d_70af_4438_86c7_3856f1733097.slice/crio-4ac77a88c653ca93004856be72cf8e462024899daacfe2fc07285266e83c3792 WatchSource:0}: Error finding container 4ac77a88c653ca93004856be72cf8e462024899daacfe2fc07285266e83c3792: Status 404 returned error can't find the container with id 4ac77a88c653ca93004856be72cf8e462024899daacfe2fc07285266e83c3792 Nov 24 13:35:46 crc kubenswrapper[5039]: E1124 13:35:46.333624 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-djklx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-fd75fd47d-88sqn_openstack-operators(e97ae0ee-d044-4b9d-a371-eec59a5ff932): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 13:35:46 crc kubenswrapper[5039]: E1124 13:35:46.335417 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-88sqn" podUID="e97ae0ee-d044-4b9d-a371-eec59a5ff932" Nov 24 13:35:46 crc kubenswrapper[5039]: E1124 13:35:46.338306 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4nf9l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-864885998-kg5jg_openstack-operators(ab34fb1d-70af-4438-86c7-3856f1733097): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 13:35:46 crc kubenswrapper[5039]: E1124 13:35:46.341005 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4nf9l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-864885998-kg5jg_openstack-operators(ab34fb1d-70af-4438-86c7-3856f1733097): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 13:35:46 crc kubenswrapper[5039]: E1124 13:35:46.342128 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-864885998-kg5jg" podUID="ab34fb1d-70af-4438-86c7-3856f1733097" Nov 24 13:35:46 crc kubenswrapper[5039]: E1124 13:35:46.342844 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gcnnl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-6fdc4fcf86-qzwtb_openstack-operators(90ff7526-7243-45b2-afaa-ee39dff42b46): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 13:35:46 crc kubenswrapper[5039]: E1124 13:35:46.347475 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gcnnl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-6fdc4fcf86-qzwtb_openstack-operators(90ff7526-7243-45b2-afaa-ee39dff42b46): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 13:35:46 crc kubenswrapper[5039]: E1124 13:35:46.348818 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-t4zhk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cb74df96-qk6bz_openstack-operators(6cbb9e3e-f545-4d83-aee4-8e122c54437c): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 13:35:46 crc kubenswrapper[5039]: E1124 13:35:46.348897 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-qzwtb" podUID="90ff7526-7243-45b2-afaa-ee39dff42b46" Nov 24 13:35:46 crc kubenswrapper[5039]: E1124 13:35:46.351347 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-t4zhk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cb74df96-qk6bz_openstack-operators(6cbb9e3e-f545-4d83-aee4-8e122c54437c): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 13:35:46 crc kubenswrapper[5039]: E1124 13:35:46.352796 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5cb74df96-qk6bz" podUID="6cbb9e3e-f545-4d83-aee4-8e122c54437c" Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.428479 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-88sqn" event={"ID":"e97ae0ee-d044-4b9d-a371-eec59a5ff932","Type":"ContainerStarted","Data":"51a12945d85de524f01dac581736c2976deb3599e438959a3564621dc9bf215a"} Nov 24 13:35:46 crc kubenswrapper[5039]: E1124 13:35:46.431032 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-88sqn" podUID="e97ae0ee-d044-4b9d-a371-eec59a5ff932" Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.431156 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nwsck" event={"ID":"98b88919-04d2-4c01-b45a-dd72afbbe179","Type":"ContainerStarted","Data":"33a1347ddeab8793d82a1aa90a1a4f88d547bbc06e1c814bedc91d55ad21f9d8"} Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.434204 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-j9qqq" event={"ID":"076a99d2-27b3-4d08-bdcc-876e1dec4f5f","Type":"ContainerStarted","Data":"84575d0719514b67bf2b395f37aa09524bb038d0958771391f2064400669bada"} Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.435469 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phdb6" event={"ID":"3c29a4a4-1d0c-4a1f-a4b5-a67cb564707a","Type":"ContainerStarted","Data":"93c021a6140928da4db45e78f7f68c04c783ffc25af544cf5572ace961fa3502"} Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.437759 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bgn7g" event={"ID":"bea08559-78a5-4287-85e8-a83768d94670","Type":"ContainerStarted","Data":"27461315dd4deeb64dce0226da58273ecfd115c628ec91414eddad252e708cf7"} Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.439632 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2lmlh" event={"ID":"29991995-423a-42c0-ae52-2b3c160a3e0c","Type":"ContainerStarted","Data":"32aaba3cc0fb48ac9511ecd2bae20198b3dd800137160a26d7a0554ee0ed05b1"} Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.441637 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-l8cvk" event={"ID":"0067c9ac-5dfc-4e0d-b316-161e02698ffd","Type":"ContainerStarted","Data":"9360749a2bc546f2f08ea7006d58ca8b09f6d41ace953cd96953dfd99d6079c1"} Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.443683 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-bf6985ffc-g86nb" event={"ID":"7f6bb6a1-8df6-4d15-8d27-a5bbc28b9b31","Type":"ContainerStarted","Data":"628c58ef9492008b63b7ca02bcf1214f79b929a437c6eb8dba62ed755ddfabfe"} Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.454761 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-nng76" event={"ID":"979a5bac-57c9-4d42-9af6-11228e980f7f","Type":"ContainerStarted","Data":"caaaad5ba41fc28b77b6d0c087f47840bdbbebaad52575f263fd2d18dd465e97"} Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.456379 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-kg5jg" event={"ID":"ab34fb1d-70af-4438-86c7-3856f1733097","Type":"ContainerStarted","Data":"4ac77a88c653ca93004856be72cf8e462024899daacfe2fc07285266e83c3792"} Nov 24 13:35:46 crc kubenswrapper[5039]: E1124 13:35:46.459488 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-864885998-kg5jg" podUID="ab34fb1d-70af-4438-86c7-3856f1733097" Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.460313 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-qzwtb" event={"ID":"90ff7526-7243-45b2-afaa-ee39dff42b46","Type":"ContainerStarted","Data":"f7e5e95f3853077c6b96e758d7c9cb817e5f03ca38f1bfdf5218eb80a79da5cd"} Nov 24 13:35:46 crc kubenswrapper[5039]: E1124 13:35:46.462223 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-qzwtb" podUID="90ff7526-7243-45b2-afaa-ee39dff42b46" Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.471724 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rvq7d" event={"ID":"edf2350a-f77f-45ec-87c1-35f7b38ddcb3","Type":"ContainerStarted","Data":"79e7b7c30ff8a04cc1dd6eddf32896f850aab24543d90b2011492c43cff3d2df"} Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.479283 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-69pxs" event={"ID":"eee87172-9357-412c-8eb2-7df01649f1d0","Type":"ContainerStarted","Data":"fa5675b921af1ef363fb53390dd63f5972236e4c162228fcfe4a42979a94c474"} Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.480795 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-ft57m" event={"ID":"058dcaa2-f18f-4eff-bfd1-d290a8fd36a1","Type":"ContainerStarted","Data":"44eb598e2ed5f3359e9df39c4beeebf4dc9f959586550f635bd600ebde0e4b2f"} Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.483319 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-68gjk" event={"ID":"35f14195-18aa-433d-8705-1aa24a8a1818","Type":"ContainerStarted","Data":"191d25fdb9fba788ea3c0ad21505979c5522ca0517d36c7eb03b2e6c596e452a"} Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.491141 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-hwn8q" event={"ID":"865f4099-70b9-45a1-9bcd-c92882c9aab1","Type":"ContainerStarted","Data":"6fa4423f21d3bc6c1275ec4e61d1c7e2b70d3f2d37353aa3b88a2a893ef0bb56"} Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.496414 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-qk6bz" event={"ID":"6cbb9e3e-f545-4d83-aee4-8e122c54437c","Type":"ContainerStarted","Data":"8dd53dab95f4b7ec9dc7e67714ca393745e355bab3cec47ab9e7e7da413826fd"} Nov 24 13:35:46 crc kubenswrapper[5039]: E1124 13:35:46.503562 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cb74df96-qk6bz" podUID="6cbb9e3e-f545-4d83-aee4-8e122c54437c" Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.694985 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-webhook-certs\") pod \"openstack-operator-controller-manager-7b5fb95979-n45b6\" (UID: \"0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a\") " pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" Nov 24 13:35:46 crc kubenswrapper[5039]: I1124 13:35:46.695060 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-metrics-certs\") pod \"openstack-operator-controller-manager-7b5fb95979-n45b6\" (UID: \"0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a\") " pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" Nov 24 13:35:46 crc kubenswrapper[5039]: E1124 13:35:46.695190 5039 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 24 13:35:46 crc kubenswrapper[5039]: E1124 13:35:46.695277 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-webhook-certs podName:0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a nodeName:}" failed. No retries permitted until 2025-11-24 13:35:48.69525634 +0000 UTC m=+1061.134380840 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-webhook-certs") pod "openstack-operator-controller-manager-7b5fb95979-n45b6" (UID: "0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a") : secret "webhook-server-cert" not found Nov 24 13:35:46 crc kubenswrapper[5039]: E1124 13:35:46.698463 5039 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 24 13:35:46 crc kubenswrapper[5039]: E1124 13:35:46.698630 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-metrics-certs podName:0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a nodeName:}" failed. No retries permitted until 2025-11-24 13:35:48.698569921 +0000 UTC m=+1061.137694491 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-metrics-certs") pod "openstack-operator-controller-manager-7b5fb95979-n45b6" (UID: "0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a") : secret "metrics-server-cert" not found Nov 24 13:35:47 crc kubenswrapper[5039]: E1124 13:35:47.514799 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-qzwtb" podUID="90ff7526-7243-45b2-afaa-ee39dff42b46" Nov 24 13:35:47 crc kubenswrapper[5039]: E1124 13:35:47.514960 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-88sqn" podUID="e97ae0ee-d044-4b9d-a371-eec59a5ff932" Nov 24 13:35:47 crc kubenswrapper[5039]: E1124 13:35:47.515072 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cb74df96-qk6bz" podUID="6cbb9e3e-f545-4d83-aee4-8e122c54437c" Nov 24 13:35:47 crc kubenswrapper[5039]: E1124 13:35:47.515106 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-864885998-kg5jg" podUID="ab34fb1d-70af-4438-86c7-3856f1733097" Nov 24 13:35:47 crc kubenswrapper[5039]: I1124 13:35:47.714906 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bd656299-f7da-4ca8-aee9-25c389243cc9-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-j7gmp\" (UID: \"bd656299-f7da-4ca8-aee9-25c389243cc9\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp" Nov 24 13:35:47 crc kubenswrapper[5039]: I1124 13:35:47.727517 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bd656299-f7da-4ca8-aee9-25c389243cc9-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-j7gmp\" (UID: \"bd656299-f7da-4ca8-aee9-25c389243cc9\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp" Nov 24 13:35:48 crc kubenswrapper[5039]: I1124 13:35:48.011785 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp" Nov 24 13:35:48 crc kubenswrapper[5039]: I1124 13:35:48.020004 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3731fd87-4c6a-4fb0-a3d5-cf48e76a5448-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb\" (UID: \"3731fd87-4c6a-4fb0-a3d5-cf48e76a5448\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" Nov 24 13:35:48 crc kubenswrapper[5039]: I1124 13:35:48.038930 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3731fd87-4c6a-4fb0-a3d5-cf48e76a5448-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb\" (UID: \"3731fd87-4c6a-4fb0-a3d5-cf48e76a5448\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" Nov 24 13:35:48 crc kubenswrapper[5039]: I1124 13:35:48.257550 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" Nov 24 13:35:48 crc kubenswrapper[5039]: I1124 13:35:48.733101 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-webhook-certs\") pod \"openstack-operator-controller-manager-7b5fb95979-n45b6\" (UID: \"0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a\") " pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" Nov 24 13:35:48 crc kubenswrapper[5039]: I1124 13:35:48.733477 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-metrics-certs\") pod \"openstack-operator-controller-manager-7b5fb95979-n45b6\" (UID: \"0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a\") " pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" Nov 24 13:35:48 crc kubenswrapper[5039]: I1124 13:35:48.737802 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-webhook-certs\") pod \"openstack-operator-controller-manager-7b5fb95979-n45b6\" (UID: \"0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a\") " pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" Nov 24 13:35:48 crc kubenswrapper[5039]: I1124 13:35:48.739014 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a-metrics-certs\") pod \"openstack-operator-controller-manager-7b5fb95979-n45b6\" (UID: \"0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a\") " pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" Nov 24 13:35:48 crc kubenswrapper[5039]: I1124 13:35:48.851555 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" Nov 24 13:35:50 crc kubenswrapper[5039]: I1124 13:35:50.101030 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:35:50 crc kubenswrapper[5039]: I1124 13:35:50.101105 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:35:57 crc kubenswrapper[5039]: E1124 13:35:57.962851 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:5edd825a235f5784d9a65892763c5388c39df1731d0fcbf4ee33408b8c83ac96" Nov 24 13:35:57 crc kubenswrapper[5039]: E1124 13:35:57.963453 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:5edd825a235f5784d9a65892763c5388c39df1731d0fcbf4ee33408b8c83ac96,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vl87j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-774b86978c-bgn7g_openstack-operators(bea08559-78a5-4287-85e8-a83768d94670): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:35:59 crc kubenswrapper[5039]: E1124 13:35:59.468060 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:d38faa9070da05487afdaa9e261ad39274c2ed862daf42efa460a040431f1991" Nov 24 13:35:59 crc kubenswrapper[5039]: E1124 13:35:59.468270 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:d38faa9070da05487afdaa9e261ad39274c2ed862daf42efa460a040431f1991,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bn22l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-68b95954c9-l8cvk_openstack-operators(0067c9ac-5dfc-4e0d-b316-161e02698ffd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:35:59 crc kubenswrapper[5039]: E1124 13:35:59.917162 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:7b90521b9e9cb4eb43c2f1c3bf85dbd068d684315f4f705b07708dd078df9d04" Nov 24 13:35:59 crc kubenswrapper[5039]: E1124 13:35:59.917686 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:7b90521b9e9cb4eb43c2f1c3bf85dbd068d684315f4f705b07708dd078df9d04,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ht55n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-cb6c4fdb7-phdb6_openstack-operators(3c29a4a4-1d0c-4a1f-a4b5-a67cb564707a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:36:01 crc kubenswrapper[5039]: E1124 13:36:01.002296 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b" Nov 24 13:36:01 crc kubenswrapper[5039]: E1124 13:36:01.002469 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-b84fg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-66cf5c67ff-ft57m_openstack-operators(058dcaa2-f18f-4eff-bfd1-d290a8fd36a1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:36:02 crc kubenswrapper[5039]: E1124 13:36:02.764560 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7" Nov 24 13:36:02 crc kubenswrapper[5039]: E1124 13:36:02.764778 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wnsbw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79556f57fc-nwsck_openstack-operators(98b88919-04d2-4c01-b45a-dd72afbbe179): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:36:03 crc kubenswrapper[5039]: E1124 13:36:03.178937 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.30:5001/openstack-k8s-operators/telemetry-operator:3457e680da4cff4bb0788c87b90a083a663f8ab1" Nov 24 13:36:03 crc kubenswrapper[5039]: E1124 13:36:03.179341 5039 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.30:5001/openstack-k8s-operators/telemetry-operator:3457e680da4cff4bb0788c87b90a083a663f8ab1" Nov 24 13:36:03 crc kubenswrapper[5039]: E1124 13:36:03.179551 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.30:5001/openstack-k8s-operators/telemetry-operator:3457e680da4cff4bb0788c87b90a083a663f8ab1,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-69slr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-bf6985ffc-g86nb_openstack-operators(7f6bb6a1-8df6-4d15-8d27-a5bbc28b9b31): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:36:03 crc kubenswrapper[5039]: E1124 13:36:03.601270 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Nov 24 13:36:03 crc kubenswrapper[5039]: E1124 13:36:03.601837 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zk8wl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-j9qqq_openstack-operators(076a99d2-27b3-4d08-bdcc-876e1dec4f5f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:36:03 crc kubenswrapper[5039]: E1124 13:36:03.603121 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-j9qqq" podUID="076a99d2-27b3-4d08-bdcc-876e1dec4f5f" Nov 24 13:36:03 crc kubenswrapper[5039]: E1124 13:36:03.897947 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-j9qqq" podUID="076a99d2-27b3-4d08-bdcc-876e1dec4f5f" Nov 24 13:36:03 crc kubenswrapper[5039]: I1124 13:36:03.990685 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb"] Nov 24 13:36:04 crc kubenswrapper[5039]: I1124 13:36:04.065211 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6"] Nov 24 13:36:05 crc kubenswrapper[5039]: W1124 13:36:05.969096 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0f3b98cc_e2e5_45f3_8fc2_94cc1a2ce58a.slice/crio-888f7913f05916b9be03fcdd09a93e984a26e1e8d6d701352dd4b4a005554652 WatchSource:0}: Error finding container 888f7913f05916b9be03fcdd09a93e984a26e1e8d6d701352dd4b4a005554652: Status 404 returned error can't find the container with id 888f7913f05916b9be03fcdd09a93e984a26e1e8d6d701352dd4b4a005554652 Nov 24 13:36:06 crc kubenswrapper[5039]: I1124 13:36:06.387480 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp"] Nov 24 13:36:06 crc kubenswrapper[5039]: I1124 13:36:06.916140 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" event={"ID":"0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a","Type":"ContainerStarted","Data":"888f7913f05916b9be03fcdd09a93e984a26e1e8d6d701352dd4b4a005554652"} Nov 24 13:36:06 crc kubenswrapper[5039]: I1124 13:36:06.917715 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-6nwfx" event={"ID":"99f1711f-1dd9-471d-9a2d-8c6e0a46fb0d","Type":"ContainerStarted","Data":"42bdbbbac8a197fc4d0b912b30cce784101899ea6f3ea6f0076ff8ce337bd901"} Nov 24 13:36:06 crc kubenswrapper[5039]: I1124 13:36:06.918935 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" event={"ID":"3731fd87-4c6a-4fb0-a3d5-cf48e76a5448","Type":"ContainerStarted","Data":"27dff02dccff4620941bc3e0f2f44fa157fb4bf13539f067dff4afeb7142c72c"} Nov 24 13:36:07 crc kubenswrapper[5039]: W1124 13:36:07.894814 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbd656299_f7da_4ca8_aee9_25c389243cc9.slice/crio-6fbc26fe05d0342fa0032cbbd225ebd96dc26291a7b01143dd2f0e1e8fccf49e WatchSource:0}: Error finding container 6fbc26fe05d0342fa0032cbbd225ebd96dc26291a7b01143dd2f0e1e8fccf49e: Status 404 returned error can't find the container with id 6fbc26fe05d0342fa0032cbbd225ebd96dc26291a7b01143dd2f0e1e8fccf49e Nov 24 13:36:07 crc kubenswrapper[5039]: I1124 13:36:07.926806 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp" event={"ID":"bd656299-f7da-4ca8-aee9-25c389243cc9","Type":"ContainerStarted","Data":"6fbc26fe05d0342fa0032cbbd225ebd96dc26291a7b01143dd2f0e1e8fccf49e"} Nov 24 13:36:09 crc kubenswrapper[5039]: I1124 13:36:09.942435 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2lmlh" event={"ID":"29991995-423a-42c0-ae52-2b3c160a3e0c","Type":"ContainerStarted","Data":"0dc2f8126b0ed3de1303fb9af79d53e37c0cdd46a80dbdaf7050b4de3e64ace7"} Nov 24 13:36:09 crc kubenswrapper[5039]: I1124 13:36:09.943869 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-68gjk" event={"ID":"35f14195-18aa-433d-8705-1aa24a8a1818","Type":"ContainerStarted","Data":"216ca498d93d27cbe376cb9259c93a4641352feb5bd6db78f0132e70521fe6c4"} Nov 24 13:36:09 crc kubenswrapper[5039]: I1124 13:36:09.945837 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-hwn8q" event={"ID":"865f4099-70b9-45a1-9bcd-c92882c9aab1","Type":"ContainerStarted","Data":"4ac035f8c20373ae5e497591594e1c2cda3a4edd6e5c4efc8033bb55fbd8cd49"} Nov 24 13:36:09 crc kubenswrapper[5039]: I1124 13:36:09.947621 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-sgqwz" event={"ID":"b913096c-9ece-4755-9545-0116fbc53123","Type":"ContainerStarted","Data":"e48fd587ca8b0dbb34cce5c2d1b2fc6f090662e7fe4fd8e84be8927203055240"} Nov 24 13:36:09 crc kubenswrapper[5039]: I1124 13:36:09.949106 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-whklh" event={"ID":"396e7965-a743-4028-989b-e3610abb5a3a","Type":"ContainerStarted","Data":"55464fbf1b396e9f9ec0f4b9044824f1ce66b6fe0373ac2c48c42c0d51c31bdc"} Nov 24 13:36:10 crc kubenswrapper[5039]: I1124 13:36:10.964543 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-88sqn" event={"ID":"e97ae0ee-d044-4b9d-a371-eec59a5ff932","Type":"ContainerStarted","Data":"91015d2d34fbf1085503423932a1732c47a975ec5456ae9f402fa036a7b23d08"} Nov 24 13:36:10 crc kubenswrapper[5039]: I1124 13:36:10.967166 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-qk6bz" event={"ID":"6cbb9e3e-f545-4d83-aee4-8e122c54437c","Type":"ContainerStarted","Data":"d334ec1662516f964ef36e185a24879520e5622581d6458fd08bb66fa82de65b"} Nov 24 13:36:10 crc kubenswrapper[5039]: I1124 13:36:10.969973 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-69pxs" event={"ID":"eee87172-9357-412c-8eb2-7df01649f1d0","Type":"ContainerStarted","Data":"e40a096d6df04d7d531c863721c5e542ef4996cea6c627b8e42639702b6296ff"} Nov 24 13:36:10 crc kubenswrapper[5039]: I1124 13:36:10.975289 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" event={"ID":"0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a","Type":"ContainerStarted","Data":"38b796bc72b662e16716b0fd52af5d29b8d26cf9a71fa88d6b6fb0b1975e9c9c"} Nov 24 13:36:10 crc kubenswrapper[5039]: I1124 13:36:10.976377 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" Nov 24 13:36:10 crc kubenswrapper[5039]: I1124 13:36:10.978185 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rvq7d" event={"ID":"edf2350a-f77f-45ec-87c1-35f7b38ddcb3","Type":"ContainerStarted","Data":"d775c5e23cded7d4ae9625ad898794326fae7775dc08cb7340b2b7eb527316dc"} Nov 24 13:36:10 crc kubenswrapper[5039]: I1124 13:36:10.984544 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-nng76" event={"ID":"979a5bac-57c9-4d42-9af6-11228e980f7f","Type":"ContainerStarted","Data":"80bc5754a9702235ce5d8e013af59c3c24e9ef9f76c60da0cd3401691c9b3347"} Nov 24 13:36:11 crc kubenswrapper[5039]: I1124 13:36:11.006767 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" podStartSLOduration=27.00674729 podStartE2EDuration="27.00674729s" podCreationTimestamp="2025-11-24 13:35:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:36:11.00306388 +0000 UTC m=+1083.442188380" watchObservedRunningTime="2025-11-24 13:36:11.00674729 +0000 UTC m=+1083.445871790" Nov 24 13:36:13 crc kubenswrapper[5039]: I1124 13:36:13.002453 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-kg5jg" event={"ID":"ab34fb1d-70af-4438-86c7-3856f1733097","Type":"ContainerStarted","Data":"7c08099283968af25bd3779d68759259a4b1a25d8471f690a9d2c2facfcc46ee"} Nov 24 13:36:13 crc kubenswrapper[5039]: I1124 13:36:13.003139 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-kg5jg" event={"ID":"ab34fb1d-70af-4438-86c7-3856f1733097","Type":"ContainerStarted","Data":"f4a9a7c72e1b540f0260e2a8cd48097b263c7a9990ebd453ab0f90257f01f799"} Nov 24 13:36:13 crc kubenswrapper[5039]: I1124 13:36:13.003346 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-864885998-kg5jg" Nov 24 13:36:13 crc kubenswrapper[5039]: I1124 13:36:13.004536 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-qzwtb" event={"ID":"90ff7526-7243-45b2-afaa-ee39dff42b46","Type":"ContainerStarted","Data":"003e636363cd61755fbf1894d3392f4e102b4b34953a34567ddb2963f409edcf"} Nov 24 13:36:13 crc kubenswrapper[5039]: I1124 13:36:13.004744 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-qzwtb" event={"ID":"90ff7526-7243-45b2-afaa-ee39dff42b46","Type":"ContainerStarted","Data":"f5533946d202de258aa7258604198d69808a552caf059050a01b7da90edae1e0"} Nov 24 13:36:13 crc kubenswrapper[5039]: I1124 13:36:13.004841 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-qzwtb" Nov 24 13:36:13 crc kubenswrapper[5039]: I1124 13:36:13.005927 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rvq7d" event={"ID":"edf2350a-f77f-45ec-87c1-35f7b38ddcb3","Type":"ContainerStarted","Data":"887ceea598cc57ce076442f6ae8e96951c409ebb0b8ac7b6ebfe86ae5aa85746"} Nov 24 13:36:13 crc kubenswrapper[5039]: I1124 13:36:13.024974 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-864885998-kg5jg" podStartSLOduration=3.495898992 podStartE2EDuration="29.024956769s" podCreationTimestamp="2025-11-24 13:35:44 +0000 UTC" firstStartedPulling="2025-11-24 13:35:46.338155214 +0000 UTC m=+1058.777279714" lastFinishedPulling="2025-11-24 13:36:11.867212991 +0000 UTC m=+1084.306337491" observedRunningTime="2025-11-24 13:36:13.02334522 +0000 UTC m=+1085.462469720" watchObservedRunningTime="2025-11-24 13:36:13.024956769 +0000 UTC m=+1085.464081279" Nov 24 13:36:13 crc kubenswrapper[5039]: I1124 13:36:13.042714 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rvq7d" podStartSLOduration=3.9366496 podStartE2EDuration="30.0426974s" podCreationTimestamp="2025-11-24 13:35:43 +0000 UTC" firstStartedPulling="2025-11-24 13:35:46.075613603 +0000 UTC m=+1058.514738103" lastFinishedPulling="2025-11-24 13:36:12.181661403 +0000 UTC m=+1084.620785903" observedRunningTime="2025-11-24 13:36:13.038745873 +0000 UTC m=+1085.477870373" watchObservedRunningTime="2025-11-24 13:36:13.0426974 +0000 UTC m=+1085.481821900" Nov 24 13:36:13 crc kubenswrapper[5039]: I1124 13:36:13.060423 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-qzwtb" podStartSLOduration=6.577411557 podStartE2EDuration="29.060393229s" podCreationTimestamp="2025-11-24 13:35:44 +0000 UTC" firstStartedPulling="2025-11-24 13:35:46.342766897 +0000 UTC m=+1058.781891397" lastFinishedPulling="2025-11-24 13:36:08.825748569 +0000 UTC m=+1081.264873069" observedRunningTime="2025-11-24 13:36:13.057752075 +0000 UTC m=+1085.496876585" watchObservedRunningTime="2025-11-24 13:36:13.060393229 +0000 UTC m=+1085.499517769" Nov 24 13:36:14 crc kubenswrapper[5039]: I1124 13:36:14.013686 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rvq7d" Nov 24 13:36:15 crc kubenswrapper[5039]: I1124 13:36:15.024044 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rvq7d" Nov 24 13:36:18 crc kubenswrapper[5039]: I1124 13:36:18.860173 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-7b5fb95979-n45b6" Nov 24 13:36:20 crc kubenswrapper[5039]: I1124 13:36:20.101844 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:36:20 crc kubenswrapper[5039]: I1124 13:36:20.102760 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:36:24 crc kubenswrapper[5039]: I1124 13:36:24.800887 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-qzwtb" Nov 24 13:36:25 crc kubenswrapper[5039]: I1124 13:36:25.245217 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-864885998-kg5jg" Nov 24 13:36:33 crc kubenswrapper[5039]: E1124 13:36:33.381834 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd" Nov 24 13:36:33 crc kubenswrapper[5039]: E1124 13:36:33.382849 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nss9c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb_openstack-operators(3731fd87-4c6a-4fb0-a3d5-cf48e76a5448): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:36:36 crc kubenswrapper[5039]: E1124 13:36:36.517638 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894" Nov 24 13:36:36 crc kubenswrapper[5039]: E1124 13:36:36.518996 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mpzrs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-d5cc86f4b-j7gmp_openstack-operators(bd656299-f7da-4ca8-aee9-25c389243cc9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:36:41 crc kubenswrapper[5039]: E1124 13:36:41.663028 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp" podUID="bd656299-f7da-4ca8-aee9-25c389243cc9" Nov 24 13:36:41 crc kubenswrapper[5039]: E1124 13:36:41.728412 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" podUID="3731fd87-4c6a-4fb0-a3d5-cf48e76a5448" Nov 24 13:36:41 crc kubenswrapper[5039]: E1124 13:36:41.728537 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-ft57m" podUID="058dcaa2-f18f-4eff-bfd1-d290a8fd36a1" Nov 24 13:36:41 crc kubenswrapper[5039]: E1124 13:36:41.729303 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-l8cvk" podUID="0067c9ac-5dfc-4e0d-b316-161e02698ffd" Nov 24 13:36:41 crc kubenswrapper[5039]: E1124 13:36:41.741358 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-bf6985ffc-g86nb" podUID="7f6bb6a1-8df6-4d15-8d27-a5bbc28b9b31" Nov 24 13:36:41 crc kubenswrapper[5039]: E1124 13:36:41.774412 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phdb6" podUID="3c29a4a4-1d0c-4a1f-a4b5-a67cb564707a" Nov 24 13:36:41 crc kubenswrapper[5039]: E1124 13:36:41.796210 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bgn7g" podUID="bea08559-78a5-4287-85e8-a83768d94670" Nov 24 13:36:41 crc kubenswrapper[5039]: E1124 13:36:41.796459 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nwsck" podUID="98b88919-04d2-4c01-b45a-dd72afbbe179" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.246809 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-qk6bz" event={"ID":"6cbb9e3e-f545-4d83-aee4-8e122c54437c","Type":"ContainerStarted","Data":"ba6cfc357bc24d612d7df5bc612d147ca78aaa2a0032d8d94106935001ec346d"} Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.246987 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cb74df96-qk6bz" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.248386 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-sgqwz" event={"ID":"b913096c-9ece-4755-9545-0116fbc53123","Type":"ContainerStarted","Data":"789b4b414cf0b6f6b5cc1bbd6e999207c6698a66616471ee6299268a91884ced"} Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.249219 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-sgqwz" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.249713 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cb74df96-qk6bz" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.251174 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-whklh" event={"ID":"396e7965-a743-4028-989b-e3610abb5a3a","Type":"ContainerStarted","Data":"02ed1730b2a668cf21b75a9af99114f4dc03c6fa69aa52334dd8cc7a50b56f87"} Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.252892 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2lmlh" event={"ID":"29991995-423a-42c0-ae52-2b3c160a3e0c","Type":"ContainerStarted","Data":"a2304250ce3925c26e0cff33bc538a012cea52637a7df0c6d2fe04b2812fd4cf"} Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.253155 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2lmlh" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.255669 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-nng76" event={"ID":"979a5bac-57c9-4d42-9af6-11228e980f7f","Type":"ContainerStarted","Data":"443445f515e470401a2c402d46372b74ed57d6fdf47f02c050bfd8ebce58e9ce"} Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.255826 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-nng76" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.257069 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2lmlh" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.257584 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phdb6" event={"ID":"3c29a4a4-1d0c-4a1f-a4b5-a67cb564707a","Type":"ContainerStarted","Data":"ff4aa32472e72b46f62ae5ccd4ce7180219dd1055738a22c8546e5c5549777cf"} Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.259000 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" event={"ID":"3731fd87-4c6a-4fb0-a3d5-cf48e76a5448","Type":"ContainerStarted","Data":"cfe23522d00c37f745d761bd58fb6d125bb8f4ba9e2ad14fbd3f85f75e58aec1"} Nov 24 13:36:42 crc kubenswrapper[5039]: E1124 13:36:42.259997 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" podUID="3731fd87-4c6a-4fb0-a3d5-cf48e76a5448" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.260145 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-sgqwz" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.260496 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-nng76" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.263350 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp" event={"ID":"bd656299-f7da-4ca8-aee9-25c389243cc9","Type":"ContainerStarted","Data":"5a92db4fec1d1036faea8611480ef5cb91a2343a5731b68acfb8f474c8f460ee"} Nov 24 13:36:42 crc kubenswrapper[5039]: E1124 13:36:42.265325 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894\\\"\"" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp" podUID="bd656299-f7da-4ca8-aee9-25c389243cc9" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.267387 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-68gjk" event={"ID":"35f14195-18aa-433d-8705-1aa24a8a1818","Type":"ContainerStarted","Data":"eb2810a7f76454695ff2ad14385e8e577b67345fdd6354e0059b414f905f54eb"} Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.269030 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cb74df96-qk6bz" podStartSLOduration=3.814968735 podStartE2EDuration="58.269006706s" podCreationTimestamp="2025-11-24 13:35:44 +0000 UTC" firstStartedPulling="2025-11-24 13:35:46.347329187 +0000 UTC m=+1058.786453687" lastFinishedPulling="2025-11-24 13:36:40.801367148 +0000 UTC m=+1113.240491658" observedRunningTime="2025-11-24 13:36:42.263642935 +0000 UTC m=+1114.702767435" watchObservedRunningTime="2025-11-24 13:36:42.269006706 +0000 UTC m=+1114.708131206" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.269489 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-68gjk" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.270234 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-68gjk" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.270847 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-j9qqq" event={"ID":"076a99d2-27b3-4d08-bdcc-876e1dec4f5f","Type":"ContainerStarted","Data":"b868a1075a20273cd3db22ad0b62338991f8494bf9ab66d9e264a1ac6afa715d"} Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.274232 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-6nwfx" event={"ID":"99f1711f-1dd9-471d-9a2d-8c6e0a46fb0d","Type":"ContainerStarted","Data":"6410efd667775a40cc019c91982e3dfd3d04416a0bd1d30e2f653e98e1296008"} Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.274603 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-6nwfx" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.278780 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bgn7g" event={"ID":"bea08559-78a5-4287-85e8-a83768d94670","Type":"ContainerStarted","Data":"5563299f8dbee22609d649f3ee556a19e5ccae88e82e6fe7f239c72c42128355"} Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.279024 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-6nwfx" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.284419 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-hwn8q" event={"ID":"865f4099-70b9-45a1-9bcd-c92882c9aab1","Type":"ContainerStarted","Data":"5eb60769dc1d5a5b7806dac64f25f5c16c8afb9851aa6d4c9eb3db48df1e28a7"} Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.284549 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-hwn8q" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.289037 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-bf6985ffc-g86nb" event={"ID":"7f6bb6a1-8df6-4d15-8d27-a5bbc28b9b31","Type":"ContainerStarted","Data":"27a74141f60548b88849c2c2676564a24b768a15acd22e336ec9c80e87de50f4"} Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.293600 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-hwn8q" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.295789 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-69pxs" event={"ID":"eee87172-9357-412c-8eb2-7df01649f1d0","Type":"ContainerStarted","Data":"ce43ff9179730b161605e971b8a87b77044c747843e89814d35ad71af39ee747"} Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.295941 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-69pxs" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.299973 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-69pxs" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.301603 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nwsck" event={"ID":"98b88919-04d2-4c01-b45a-dd72afbbe179","Type":"ContainerStarted","Data":"c04395ec4ea660796af843e0cbe7f3bb37db09efa2b750c7df9c9f7fd599b675"} Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.321605 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-88sqn" event={"ID":"e97ae0ee-d044-4b9d-a371-eec59a5ff932","Type":"ContainerStarted","Data":"a46b4e97db3382da858ef7630214446585846698f2b6dc8fbc0d87583483e580"} Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.321653 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-88sqn" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.323326 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-88sqn" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.341855 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-l8cvk" event={"ID":"0067c9ac-5dfc-4e0d-b316-161e02698ffd","Type":"ContainerStarted","Data":"89ddd13183927f32ea3b262c5fc6e47dca860c4e725ae9ed9df1f05bb4325b71"} Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.364678 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-ft57m" event={"ID":"058dcaa2-f18f-4eff-bfd1-d290a8fd36a1","Type":"ContainerStarted","Data":"6d5280caad8f0e320cf01adb72159091f34f6d6baa94aba8313483aceee89bb0"} Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.411314 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-nng76" podStartSLOduration=4.493661236 podStartE2EDuration="59.411293496s" podCreationTimestamp="2025-11-24 13:35:43 +0000 UTC" firstStartedPulling="2025-11-24 13:35:46.065493118 +0000 UTC m=+1058.504617618" lastFinishedPulling="2025-11-24 13:36:40.983125378 +0000 UTC m=+1113.422249878" observedRunningTime="2025-11-24 13:36:42.406459739 +0000 UTC m=+1114.845584259" watchObservedRunningTime="2025-11-24 13:36:42.411293496 +0000 UTC m=+1114.850417996" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.461687 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-whklh" podStartSLOduration=3.750078077 podStartE2EDuration="59.461672101s" podCreationTimestamp="2025-11-24 13:35:43 +0000 UTC" firstStartedPulling="2025-11-24 13:35:45.157541582 +0000 UTC m=+1057.596666082" lastFinishedPulling="2025-11-24 13:36:40.869135596 +0000 UTC m=+1113.308260106" observedRunningTime="2025-11-24 13:36:42.461042767 +0000 UTC m=+1114.900167267" watchObservedRunningTime="2025-11-24 13:36:42.461672101 +0000 UTC m=+1114.900796601" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.487298 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2lmlh" podStartSLOduration=4.868081504 podStartE2EDuration="59.487281915s" podCreationTimestamp="2025-11-24 13:35:43 +0000 UTC" firstStartedPulling="2025-11-24 13:35:46.044662782 +0000 UTC m=+1058.483787282" lastFinishedPulling="2025-11-24 13:36:40.663863183 +0000 UTC m=+1113.102987693" observedRunningTime="2025-11-24 13:36:42.482916299 +0000 UTC m=+1114.922040799" watchObservedRunningTime="2025-11-24 13:36:42.487281915 +0000 UTC m=+1114.926406415" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.505098 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-sgqwz" podStartSLOduration=3.686123143 podStartE2EDuration="59.505081307s" podCreationTimestamp="2025-11-24 13:35:43 +0000 UTC" firstStartedPulling="2025-11-24 13:35:45.132514645 +0000 UTC m=+1057.571639155" lastFinishedPulling="2025-11-24 13:36:40.951472819 +0000 UTC m=+1113.390597319" observedRunningTime="2025-11-24 13:36:42.503620662 +0000 UTC m=+1114.942745152" watchObservedRunningTime="2025-11-24 13:36:42.505081307 +0000 UTC m=+1114.944205807" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.527226 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-j9qqq" podStartSLOduration=3.887328048 podStartE2EDuration="58.527208526s" podCreationTimestamp="2025-11-24 13:35:44 +0000 UTC" firstStartedPulling="2025-11-24 13:35:46.309356266 +0000 UTC m=+1058.748480766" lastFinishedPulling="2025-11-24 13:36:40.949236744 +0000 UTC m=+1113.388361244" observedRunningTime="2025-11-24 13:36:42.521827055 +0000 UTC m=+1114.960951545" watchObservedRunningTime="2025-11-24 13:36:42.527208526 +0000 UTC m=+1114.966333026" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.542845 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-68gjk" podStartSLOduration=4.744769292 podStartE2EDuration="59.542828916s" podCreationTimestamp="2025-11-24 13:35:43 +0000 UTC" firstStartedPulling="2025-11-24 13:35:45.731206135 +0000 UTC m=+1058.170330635" lastFinishedPulling="2025-11-24 13:36:40.529265759 +0000 UTC m=+1112.968390259" observedRunningTime="2025-11-24 13:36:42.539181127 +0000 UTC m=+1114.978305627" watchObservedRunningTime="2025-11-24 13:36:42.542828916 +0000 UTC m=+1114.981953416" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.649293 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-6nwfx" podStartSLOduration=3.673382901 podStartE2EDuration="59.649270444s" podCreationTimestamp="2025-11-24 13:35:43 +0000 UTC" firstStartedPulling="2025-11-24 13:35:44.978726562 +0000 UTC m=+1057.417851062" lastFinishedPulling="2025-11-24 13:36:40.954614095 +0000 UTC m=+1113.393738605" observedRunningTime="2025-11-24 13:36:42.633409629 +0000 UTC m=+1115.072534129" watchObservedRunningTime="2025-11-24 13:36:42.649270444 +0000 UTC m=+1115.088394944" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.682062 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-69pxs" podStartSLOduration=4.526347709 podStartE2EDuration="59.682047202s" podCreationTimestamp="2025-11-24 13:35:43 +0000 UTC" firstStartedPulling="2025-11-24 13:35:45.71085116 +0000 UTC m=+1058.149975680" lastFinishedPulling="2025-11-24 13:36:40.866550663 +0000 UTC m=+1113.305675173" observedRunningTime="2025-11-24 13:36:42.680543926 +0000 UTC m=+1115.119668426" watchObservedRunningTime="2025-11-24 13:36:42.682047202 +0000 UTC m=+1115.121171702" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.695972 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-88sqn" podStartSLOduration=26.865803244 podStartE2EDuration="59.695955431s" podCreationTimestamp="2025-11-24 13:35:43 +0000 UTC" firstStartedPulling="2025-11-24 13:35:46.330951099 +0000 UTC m=+1058.770075599" lastFinishedPulling="2025-11-24 13:36:19.161103286 +0000 UTC m=+1091.600227786" observedRunningTime="2025-11-24 13:36:42.695770526 +0000 UTC m=+1115.134895026" watchObservedRunningTime="2025-11-24 13:36:42.695955431 +0000 UTC m=+1115.135079931" Nov 24 13:36:42 crc kubenswrapper[5039]: I1124 13:36:42.724616 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-hwn8q" podStartSLOduration=5.145512385 podStartE2EDuration="59.724582516s" podCreationTimestamp="2025-11-24 13:35:43 +0000 UTC" firstStartedPulling="2025-11-24 13:35:46.323223812 +0000 UTC m=+1058.762348302" lastFinishedPulling="2025-11-24 13:36:40.902293893 +0000 UTC m=+1113.341418433" observedRunningTime="2025-11-24 13:36:42.718730414 +0000 UTC m=+1115.157854914" watchObservedRunningTime="2025-11-24 13:36:42.724582516 +0000 UTC m=+1115.163707016" Nov 24 13:36:43 crc kubenswrapper[5039]: I1124 13:36:43.382332 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-whklh" Nov 24 13:36:43 crc kubenswrapper[5039]: I1124 13:36:43.383111 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-whklh" Nov 24 13:36:43 crc kubenswrapper[5039]: E1124 13:36:43.603197 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894\\\"\"" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp" podUID="bd656299-f7da-4ca8-aee9-25c389243cc9" Nov 24 13:36:45 crc kubenswrapper[5039]: I1124 13:36:45.422227 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phdb6" event={"ID":"3c29a4a4-1d0c-4a1f-a4b5-a67cb564707a","Type":"ContainerStarted","Data":"bc74dfb56ba05515dadfef0bedfcdb95582385e3ae0e6bd1d78cda341ab09342"} Nov 24 13:36:45 crc kubenswrapper[5039]: I1124 13:36:45.423050 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phdb6" Nov 24 13:36:45 crc kubenswrapper[5039]: I1124 13:36:45.425486 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bgn7g" event={"ID":"bea08559-78a5-4287-85e8-a83768d94670","Type":"ContainerStarted","Data":"afdaf481c0b4eda5baa96163f4d20559734ed57a378a8f0971f652a34dfb883f"} Nov 24 13:36:45 crc kubenswrapper[5039]: I1124 13:36:45.427270 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-bf6985ffc-g86nb" event={"ID":"7f6bb6a1-8df6-4d15-8d27-a5bbc28b9b31","Type":"ContainerStarted","Data":"d6e41240b0111848c4c0be231c83762ebb1ddd68ec8900655601f36c6d0d2282"} Nov 24 13:36:45 crc kubenswrapper[5039]: I1124 13:36:45.427354 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-bf6985ffc-g86nb" Nov 24 13:36:45 crc kubenswrapper[5039]: I1124 13:36:45.430190 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-l8cvk" event={"ID":"0067c9ac-5dfc-4e0d-b316-161e02698ffd","Type":"ContainerStarted","Data":"86d5f17d033591e9f2f667a019f47c1c5e5846bca814b226876c0af540650545"} Nov 24 13:36:45 crc kubenswrapper[5039]: I1124 13:36:45.430330 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-l8cvk" Nov 24 13:36:45 crc kubenswrapper[5039]: I1124 13:36:45.431685 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-ft57m" event={"ID":"058dcaa2-f18f-4eff-bfd1-d290a8fd36a1","Type":"ContainerStarted","Data":"45aa43965f7a6ca68d5e2edfb589022af82226bb3fbfd13f7833acf6504e4c44"} Nov 24 13:36:45 crc kubenswrapper[5039]: I1124 13:36:45.431801 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-ft57m" Nov 24 13:36:45 crc kubenswrapper[5039]: I1124 13:36:45.433277 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nwsck" event={"ID":"98b88919-04d2-4c01-b45a-dd72afbbe179","Type":"ContainerStarted","Data":"2fd93ccaf37fc6b101fb782b271dcfc1d063ef0cec5244a3810a4624ff8a89a0"} Nov 24 13:36:45 crc kubenswrapper[5039]: I1124 13:36:45.446089 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phdb6" podStartSLOduration=4.096815642 podStartE2EDuration="1m2.446067732s" podCreationTimestamp="2025-11-24 13:35:43 +0000 UTC" firstStartedPulling="2025-11-24 13:35:46.072765004 +0000 UTC m=+1058.511889504" lastFinishedPulling="2025-11-24 13:36:44.422017084 +0000 UTC m=+1116.861141594" observedRunningTime="2025-11-24 13:36:45.439469082 +0000 UTC m=+1117.878593602" watchObservedRunningTime="2025-11-24 13:36:45.446067732 +0000 UTC m=+1117.885192242" Nov 24 13:36:45 crc kubenswrapper[5039]: I1124 13:36:45.458599 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-bf6985ffc-g86nb" podStartSLOduration=3.09748807 podStartE2EDuration="1m1.458580037s" podCreationTimestamp="2025-11-24 13:35:44 +0000 UTC" firstStartedPulling="2025-11-24 13:35:46.059943373 +0000 UTC m=+1058.499067873" lastFinishedPulling="2025-11-24 13:36:44.42103534 +0000 UTC m=+1116.860159840" observedRunningTime="2025-11-24 13:36:45.454462327 +0000 UTC m=+1117.893586837" watchObservedRunningTime="2025-11-24 13:36:45.458580037 +0000 UTC m=+1117.897704547" Nov 24 13:36:45 crc kubenswrapper[5039]: I1124 13:36:45.496258 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-l8cvk" podStartSLOduration=3.782841355 podStartE2EDuration="1m2.496232933s" podCreationTimestamp="2025-11-24 13:35:43 +0000 UTC" firstStartedPulling="2025-11-24 13:35:45.70919571 +0000 UTC m=+1058.148320230" lastFinishedPulling="2025-11-24 13:36:44.422587288 +0000 UTC m=+1116.861711808" observedRunningTime="2025-11-24 13:36:45.492255746 +0000 UTC m=+1117.931380256" watchObservedRunningTime="2025-11-24 13:36:45.496232933 +0000 UTC m=+1117.935357453" Nov 24 13:36:45 crc kubenswrapper[5039]: I1124 13:36:45.498868 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bgn7g" podStartSLOduration=3.814405981 podStartE2EDuration="1m2.498856287s" podCreationTimestamp="2025-11-24 13:35:43 +0000 UTC" firstStartedPulling="2025-11-24 13:35:45.737595289 +0000 UTC m=+1058.176719789" lastFinishedPulling="2025-11-24 13:36:44.422045555 +0000 UTC m=+1116.861170095" observedRunningTime="2025-11-24 13:36:45.472492455 +0000 UTC m=+1117.911616975" watchObservedRunningTime="2025-11-24 13:36:45.498856287 +0000 UTC m=+1117.937980807" Nov 24 13:36:45 crc kubenswrapper[5039]: I1124 13:36:45.520222 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-ft57m" podStartSLOduration=4.19716513 podStartE2EDuration="1m2.520204256s" podCreationTimestamp="2025-11-24 13:35:43 +0000 UTC" firstStartedPulling="2025-11-24 13:35:46.104611886 +0000 UTC m=+1058.543736386" lastFinishedPulling="2025-11-24 13:36:44.427650972 +0000 UTC m=+1116.866775512" observedRunningTime="2025-11-24 13:36:45.513781399 +0000 UTC m=+1117.952905909" watchObservedRunningTime="2025-11-24 13:36:45.520204256 +0000 UTC m=+1117.959328756" Nov 24 13:36:45 crc kubenswrapper[5039]: I1124 13:36:45.536177 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nwsck" podStartSLOduration=4.422733455 podStartE2EDuration="1m2.536157633s" podCreationTimestamp="2025-11-24 13:35:43 +0000 UTC" firstStartedPulling="2025-11-24 13:35:46.310171065 +0000 UTC m=+1058.749295565" lastFinishedPulling="2025-11-24 13:36:44.423595193 +0000 UTC m=+1116.862719743" observedRunningTime="2025-11-24 13:36:45.529722187 +0000 UTC m=+1117.968846707" watchObservedRunningTime="2025-11-24 13:36:45.536157633 +0000 UTC m=+1117.975282133" Nov 24 13:36:46 crc kubenswrapper[5039]: I1124 13:36:46.445246 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" event={"ID":"3731fd87-4c6a-4fb0-a3d5-cf48e76a5448","Type":"ContainerStarted","Data":"2d46786dacf59d5cd79dec2e72de4797565ddb4a4f75c57ba027d3eaeaf4c791"} Nov 24 13:36:46 crc kubenswrapper[5039]: I1124 13:36:46.446384 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bgn7g" Nov 24 13:36:46 crc kubenswrapper[5039]: I1124 13:36:46.446420 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nwsck" Nov 24 13:36:46 crc kubenswrapper[5039]: I1124 13:36:46.479802 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" podStartSLOduration=24.469395314 podStartE2EDuration="1m3.479776505s" podCreationTimestamp="2025-11-24 13:35:43 +0000 UTC" firstStartedPulling="2025-11-24 13:36:05.959731025 +0000 UTC m=+1078.398855555" lastFinishedPulling="2025-11-24 13:36:44.970112246 +0000 UTC m=+1117.409236746" observedRunningTime="2025-11-24 13:36:46.47134272 +0000 UTC m=+1118.910467230" watchObservedRunningTime="2025-11-24 13:36:46.479776505 +0000 UTC m=+1118.918901045" Nov 24 13:36:48 crc kubenswrapper[5039]: I1124 13:36:48.257786 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" Nov 24 13:36:50 crc kubenswrapper[5039]: I1124 13:36:50.101635 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:36:50 crc kubenswrapper[5039]: I1124 13:36:50.102064 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:36:50 crc kubenswrapper[5039]: I1124 13:36:50.102144 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:36:50 crc kubenswrapper[5039]: I1124 13:36:50.103686 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"16f5b0fb44ff36ed732d98fa0d4391bb1a697e230891b1a79ab6e7366f72ba49"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 13:36:50 crc kubenswrapper[5039]: I1124 13:36:50.103836 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://16f5b0fb44ff36ed732d98fa0d4391bb1a697e230891b1a79ab6e7366f72ba49" gracePeriod=600 Nov 24 13:36:50 crc kubenswrapper[5039]: I1124 13:36:50.482934 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="16f5b0fb44ff36ed732d98fa0d4391bb1a697e230891b1a79ab6e7366f72ba49" exitCode=0 Nov 24 13:36:50 crc kubenswrapper[5039]: I1124 13:36:50.483012 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"16f5b0fb44ff36ed732d98fa0d4391bb1a697e230891b1a79ab6e7366f72ba49"} Nov 24 13:36:50 crc kubenswrapper[5039]: I1124 13:36:50.483583 5039 scope.go:117] "RemoveContainer" containerID="066219b28d99610e4d1092b8b5a95d47b8b9f6102be58f3694f6d12e791d5f0b" Nov 24 13:36:51 crc kubenswrapper[5039]: I1124 13:36:51.494829 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"f993c951919012dcf982065d331337a1627947abef22ad885fe48114cf5620d5"} Nov 24 13:36:54 crc kubenswrapper[5039]: I1124 13:36:54.115955 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-l8cvk" Nov 24 13:36:54 crc kubenswrapper[5039]: I1124 13:36:54.127665 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bgn7g" Nov 24 13:36:54 crc kubenswrapper[5039]: I1124 13:36:54.576562 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phdb6" Nov 24 13:36:54 crc kubenswrapper[5039]: I1124 13:36:54.637429 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nwsck" Nov 24 13:36:54 crc kubenswrapper[5039]: I1124 13:36:54.683959 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-ft57m" Nov 24 13:36:54 crc kubenswrapper[5039]: I1124 13:36:54.959790 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-bf6985ffc-g86nb" Nov 24 13:36:57 crc kubenswrapper[5039]: I1124 13:36:57.542843 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp" event={"ID":"bd656299-f7da-4ca8-aee9-25c389243cc9","Type":"ContainerStarted","Data":"89cebed2a59a5ba369105054c1ccb78b4bb590ef68a8a8d91aba41d184f88d57"} Nov 24 13:36:57 crc kubenswrapper[5039]: I1124 13:36:57.543433 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp" Nov 24 13:36:57 crc kubenswrapper[5039]: I1124 13:36:57.573066 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp" podStartSLOduration=25.664591569 podStartE2EDuration="1m14.57303535s" podCreationTimestamp="2025-11-24 13:35:43 +0000 UTC" firstStartedPulling="2025-11-24 13:36:07.908258243 +0000 UTC m=+1080.347382743" lastFinishedPulling="2025-11-24 13:36:56.816702024 +0000 UTC m=+1129.255826524" observedRunningTime="2025-11-24 13:36:57.560946827 +0000 UTC m=+1130.000071327" watchObservedRunningTime="2025-11-24 13:36:57.57303535 +0000 UTC m=+1130.012159890" Nov 24 13:36:58 crc kubenswrapper[5039]: I1124 13:36:58.264408 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb" Nov 24 13:37:08 crc kubenswrapper[5039]: I1124 13:37:08.020557 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-j7gmp" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.126626 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-mb79k"] Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.128764 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-mb79k" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.131898 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.132077 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.132191 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.132346 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-8vv8q" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.144278 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-mb79k"] Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.186183 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-whdv8"] Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.187431 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-whdv8" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.190736 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.206212 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-whdv8"] Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.235258 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d419456d-1fc2-4eb9-b95b-e44fb3be8cb3-config\") pod \"dnsmasq-dns-78dd6ddcc-whdv8\" (UID: \"d419456d-1fc2-4eb9-b95b-e44fb3be8cb3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-whdv8" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.235326 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6d5b55c-805a-41a0-b574-d8e55b00ee39-config\") pod \"dnsmasq-dns-675f4bcbfc-mb79k\" (UID: \"b6d5b55c-805a-41a0-b574-d8e55b00ee39\") " pod="openstack/dnsmasq-dns-675f4bcbfc-mb79k" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.235360 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rvhx\" (UniqueName: \"kubernetes.io/projected/b6d5b55c-805a-41a0-b574-d8e55b00ee39-kube-api-access-7rvhx\") pod \"dnsmasq-dns-675f4bcbfc-mb79k\" (UID: \"b6d5b55c-805a-41a0-b574-d8e55b00ee39\") " pod="openstack/dnsmasq-dns-675f4bcbfc-mb79k" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.235410 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d419456d-1fc2-4eb9-b95b-e44fb3be8cb3-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-whdv8\" (UID: \"d419456d-1fc2-4eb9-b95b-e44fb3be8cb3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-whdv8" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.235446 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5sdtp\" (UniqueName: \"kubernetes.io/projected/d419456d-1fc2-4eb9-b95b-e44fb3be8cb3-kube-api-access-5sdtp\") pod \"dnsmasq-dns-78dd6ddcc-whdv8\" (UID: \"d419456d-1fc2-4eb9-b95b-e44fb3be8cb3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-whdv8" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.336594 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rvhx\" (UniqueName: \"kubernetes.io/projected/b6d5b55c-805a-41a0-b574-d8e55b00ee39-kube-api-access-7rvhx\") pod \"dnsmasq-dns-675f4bcbfc-mb79k\" (UID: \"b6d5b55c-805a-41a0-b574-d8e55b00ee39\") " pod="openstack/dnsmasq-dns-675f4bcbfc-mb79k" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.336682 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d419456d-1fc2-4eb9-b95b-e44fb3be8cb3-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-whdv8\" (UID: \"d419456d-1fc2-4eb9-b95b-e44fb3be8cb3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-whdv8" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.336724 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5sdtp\" (UniqueName: \"kubernetes.io/projected/d419456d-1fc2-4eb9-b95b-e44fb3be8cb3-kube-api-access-5sdtp\") pod \"dnsmasq-dns-78dd6ddcc-whdv8\" (UID: \"d419456d-1fc2-4eb9-b95b-e44fb3be8cb3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-whdv8" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.336772 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d419456d-1fc2-4eb9-b95b-e44fb3be8cb3-config\") pod \"dnsmasq-dns-78dd6ddcc-whdv8\" (UID: \"d419456d-1fc2-4eb9-b95b-e44fb3be8cb3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-whdv8" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.336805 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6d5b55c-805a-41a0-b574-d8e55b00ee39-config\") pod \"dnsmasq-dns-675f4bcbfc-mb79k\" (UID: \"b6d5b55c-805a-41a0-b574-d8e55b00ee39\") " pod="openstack/dnsmasq-dns-675f4bcbfc-mb79k" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.337829 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6d5b55c-805a-41a0-b574-d8e55b00ee39-config\") pod \"dnsmasq-dns-675f4bcbfc-mb79k\" (UID: \"b6d5b55c-805a-41a0-b574-d8e55b00ee39\") " pod="openstack/dnsmasq-dns-675f4bcbfc-mb79k" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.337850 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d419456d-1fc2-4eb9-b95b-e44fb3be8cb3-config\") pod \"dnsmasq-dns-78dd6ddcc-whdv8\" (UID: \"d419456d-1fc2-4eb9-b95b-e44fb3be8cb3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-whdv8" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.338128 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d419456d-1fc2-4eb9-b95b-e44fb3be8cb3-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-whdv8\" (UID: \"d419456d-1fc2-4eb9-b95b-e44fb3be8cb3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-whdv8" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.355545 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rvhx\" (UniqueName: \"kubernetes.io/projected/b6d5b55c-805a-41a0-b574-d8e55b00ee39-kube-api-access-7rvhx\") pod \"dnsmasq-dns-675f4bcbfc-mb79k\" (UID: \"b6d5b55c-805a-41a0-b574-d8e55b00ee39\") " pod="openstack/dnsmasq-dns-675f4bcbfc-mb79k" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.355844 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5sdtp\" (UniqueName: \"kubernetes.io/projected/d419456d-1fc2-4eb9-b95b-e44fb3be8cb3-kube-api-access-5sdtp\") pod \"dnsmasq-dns-78dd6ddcc-whdv8\" (UID: \"d419456d-1fc2-4eb9-b95b-e44fb3be8cb3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-whdv8" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.453776 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-mb79k" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.510795 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-whdv8" Nov 24 13:37:23 crc kubenswrapper[5039]: I1124 13:37:23.917832 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-mb79k"] Nov 24 13:37:24 crc kubenswrapper[5039]: W1124 13:37:24.005303 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd419456d_1fc2_4eb9_b95b_e44fb3be8cb3.slice/crio-cb8d4042598da9dcc498db409af01377bb40639906dc9ad9d6d889db5a6843fe WatchSource:0}: Error finding container cb8d4042598da9dcc498db409af01377bb40639906dc9ad9d6d889db5a6843fe: Status 404 returned error can't find the container with id cb8d4042598da9dcc498db409af01377bb40639906dc9ad9d6d889db5a6843fe Nov 24 13:37:24 crc kubenswrapper[5039]: I1124 13:37:24.005607 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-whdv8"] Nov 24 13:37:24 crc kubenswrapper[5039]: I1124 13:37:24.508011 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-mb79k" event={"ID":"b6d5b55c-805a-41a0-b574-d8e55b00ee39","Type":"ContainerStarted","Data":"7a10426a4c9faa2cc8ee49be73b46c6626aec6b5d237ec5675687b6f3d1c69f0"} Nov 24 13:37:24 crc kubenswrapper[5039]: I1124 13:37:24.510114 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-whdv8" event={"ID":"d419456d-1fc2-4eb9-b95b-e44fb3be8cb3","Type":"ContainerStarted","Data":"cb8d4042598da9dcc498db409af01377bb40639906dc9ad9d6d889db5a6843fe"} Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.082023 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-mb79k"] Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.103902 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-kxs9n"] Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.105966 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.131222 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-kxs9n"] Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.181890 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28w9k\" (UniqueName: \"kubernetes.io/projected/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106-kube-api-access-28w9k\") pod \"dnsmasq-dns-666b6646f7-kxs9n\" (UID: \"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106\") " pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.181964 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106-config\") pod \"dnsmasq-dns-666b6646f7-kxs9n\" (UID: \"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106\") " pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.181990 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106-dns-svc\") pod \"dnsmasq-dns-666b6646f7-kxs9n\" (UID: \"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106\") " pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.284029 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106-dns-svc\") pod \"dnsmasq-dns-666b6646f7-kxs9n\" (UID: \"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106\") " pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.284145 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28w9k\" (UniqueName: \"kubernetes.io/projected/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106-kube-api-access-28w9k\") pod \"dnsmasq-dns-666b6646f7-kxs9n\" (UID: \"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106\") " pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.284191 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106-config\") pod \"dnsmasq-dns-666b6646f7-kxs9n\" (UID: \"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106\") " pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.285050 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106-config\") pod \"dnsmasq-dns-666b6646f7-kxs9n\" (UID: \"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106\") " pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.285102 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106-dns-svc\") pod \"dnsmasq-dns-666b6646f7-kxs9n\" (UID: \"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106\") " pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.310446 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28w9k\" (UniqueName: \"kubernetes.io/projected/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106-kube-api-access-28w9k\") pod \"dnsmasq-dns-666b6646f7-kxs9n\" (UID: \"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106\") " pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.441916 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.486496 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-whdv8"] Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.534811 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-k7d8d"] Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.536068 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.568348 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-k7d8d"] Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.595462 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d135c416-c117-4ff6-812c-3d02e07ebbd4-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-k7d8d\" (UID: \"d135c416-c117-4ff6-812c-3d02e07ebbd4\") " pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.595585 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t94d4\" (UniqueName: \"kubernetes.io/projected/d135c416-c117-4ff6-812c-3d02e07ebbd4-kube-api-access-t94d4\") pod \"dnsmasq-dns-57d769cc4f-k7d8d\" (UID: \"d135c416-c117-4ff6-812c-3d02e07ebbd4\") " pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.595621 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d135c416-c117-4ff6-812c-3d02e07ebbd4-config\") pod \"dnsmasq-dns-57d769cc4f-k7d8d\" (UID: \"d135c416-c117-4ff6-812c-3d02e07ebbd4\") " pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.696776 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d135c416-c117-4ff6-812c-3d02e07ebbd4-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-k7d8d\" (UID: \"d135c416-c117-4ff6-812c-3d02e07ebbd4\") " pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.696890 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t94d4\" (UniqueName: \"kubernetes.io/projected/d135c416-c117-4ff6-812c-3d02e07ebbd4-kube-api-access-t94d4\") pod \"dnsmasq-dns-57d769cc4f-k7d8d\" (UID: \"d135c416-c117-4ff6-812c-3d02e07ebbd4\") " pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.696924 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d135c416-c117-4ff6-812c-3d02e07ebbd4-config\") pod \"dnsmasq-dns-57d769cc4f-k7d8d\" (UID: \"d135c416-c117-4ff6-812c-3d02e07ebbd4\") " pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.697641 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d135c416-c117-4ff6-812c-3d02e07ebbd4-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-k7d8d\" (UID: \"d135c416-c117-4ff6-812c-3d02e07ebbd4\") " pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.697678 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d135c416-c117-4ff6-812c-3d02e07ebbd4-config\") pod \"dnsmasq-dns-57d769cc4f-k7d8d\" (UID: \"d135c416-c117-4ff6-812c-3d02e07ebbd4\") " pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.715494 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t94d4\" (UniqueName: \"kubernetes.io/projected/d135c416-c117-4ff6-812c-3d02e07ebbd4-kube-api-access-t94d4\") pod \"dnsmasq-dns-57d769cc4f-k7d8d\" (UID: \"d135c416-c117-4ff6-812c-3d02e07ebbd4\") " pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" Nov 24 13:37:26 crc kubenswrapper[5039]: I1124 13:37:26.939385 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.048269 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-kxs9n"] Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.235353 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.237173 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.238857 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.243928 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.244010 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.244030 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.244351 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-5pt4p" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.244482 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.244635 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.251298 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.307117 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.307228 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6808fd4e-3718-430c-87e8-ca3e801a8248-server-conf\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.307302 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjp8k\" (UniqueName: \"kubernetes.io/projected/6808fd4e-3718-430c-87e8-ca3e801a8248-kube-api-access-sjp8k\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.307333 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6808fd4e-3718-430c-87e8-ca3e801a8248-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.307478 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6808fd4e-3718-430c-87e8-ca3e801a8248-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.307535 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.307617 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.307650 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6808fd4e-3718-430c-87e8-ca3e801a8248-config-data\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.307758 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.307852 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.307939 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6808fd4e-3718-430c-87e8-ca3e801a8248-pod-info\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.376175 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-k7d8d"] Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.409344 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6808fd4e-3718-430c-87e8-ca3e801a8248-pod-info\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.409421 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.409447 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6808fd4e-3718-430c-87e8-ca3e801a8248-server-conf\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.409490 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjp8k\" (UniqueName: \"kubernetes.io/projected/6808fd4e-3718-430c-87e8-ca3e801a8248-kube-api-access-sjp8k\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.409582 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6808fd4e-3718-430c-87e8-ca3e801a8248-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.409642 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6808fd4e-3718-430c-87e8-ca3e801a8248-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.409663 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.409705 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.409731 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6808fd4e-3718-430c-87e8-ca3e801a8248-config-data\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.409764 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.409821 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.410529 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.410629 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.411709 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.411898 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6808fd4e-3718-430c-87e8-ca3e801a8248-config-data\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.412036 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6808fd4e-3718-430c-87e8-ca3e801a8248-server-conf\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.416445 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6808fd4e-3718-430c-87e8-ca3e801a8248-pod-info\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.416527 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.421204 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6808fd4e-3718-430c-87e8-ca3e801a8248-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.425013 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6808fd4e-3718-430c-87e8-ca3e801a8248-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.427251 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.427397 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjp8k\" (UniqueName: \"kubernetes.io/projected/6808fd4e-3718-430c-87e8-ca3e801a8248-kube-api-access-sjp8k\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.435970 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.537147 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" event={"ID":"d135c416-c117-4ff6-812c-3d02e07ebbd4","Type":"ContainerStarted","Data":"ddbd66daef8dac28b26dd26a3b51820a5ab6732c2781e28ca3e24edef41873ea"} Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.538272 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" event={"ID":"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106","Type":"ContainerStarted","Data":"35c9eb0aca9c89b1dfa30629f3ea8db8cfec1cd073a60464f6fc04c769424b78"} Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.563128 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.637118 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.638468 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.643280 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.643537 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.643669 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.643767 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.643862 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-qxwdg" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.643982 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.644120 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.648125 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.713333 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.713395 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dwtk\" (UniqueName: \"kubernetes.io/projected/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-kube-api-access-6dwtk\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.713425 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.713440 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.713457 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.713492 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.713537 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.713559 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.713604 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.713634 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.713653 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.815399 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.815443 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.815474 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.815533 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dwtk\" (UniqueName: \"kubernetes.io/projected/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-kube-api-access-6dwtk\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.815563 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.815579 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.815599 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.815640 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.815665 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.815680 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.815711 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.815886 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.818074 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.818599 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.818718 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.819143 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.819909 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.824850 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.825071 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.825432 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.825662 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.960391 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dwtk\" (UniqueName: \"kubernetes.io/projected/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-kube-api-access-6dwtk\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.961696 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:27 crc kubenswrapper[5039]: I1124 13:37:27.978943 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:37:28 crc kubenswrapper[5039]: I1124 13:37:28.177563 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 13:37:28 crc kubenswrapper[5039]: I1124 13:37:28.493667 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 13:37:28 crc kubenswrapper[5039]: I1124 13:37:28.550040 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"8e2e73c0-db1d-45e0-b056-0ed13bdbb904","Type":"ContainerStarted","Data":"959e40d2d54fd0020f51f21d62b14981a6e1c4766b554fe9d2d7ccb9697db7d2"} Nov 24 13:37:28 crc kubenswrapper[5039]: I1124 13:37:28.552398 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"6808fd4e-3718-430c-87e8-ca3e801a8248","Type":"ContainerStarted","Data":"f39a2a8966d7dbe143d746bd2913e732bf9eae7c489004c818e692d0ff9707c0"} Nov 24 13:37:28 crc kubenswrapper[5039]: I1124 13:37:28.957466 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 24 13:37:28 crc kubenswrapper[5039]: I1124 13:37:28.961763 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 24 13:37:28 crc kubenswrapper[5039]: I1124 13:37:28.965601 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 24 13:37:28 crc kubenswrapper[5039]: I1124 13:37:28.970727 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 24 13:37:28 crc kubenswrapper[5039]: I1124 13:37:28.982242 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 24 13:37:28 crc kubenswrapper[5039]: I1124 13:37:28.983043 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 24 13:37:28 crc kubenswrapper[5039]: I1124 13:37:28.983235 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-4cmqd" Nov 24 13:37:28 crc kubenswrapper[5039]: I1124 13:37:28.985963 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.144644 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c3dc205b-caf2-45c8-8110-d0f8be91e10f-config-data-generated\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.144695 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2nn6\" (UniqueName: \"kubernetes.io/projected/c3dc205b-caf2-45c8-8110-d0f8be91e10f-kube-api-access-w2nn6\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.144761 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c3dc205b-caf2-45c8-8110-d0f8be91e10f-config-data-default\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.144817 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3dc205b-caf2-45c8-8110-d0f8be91e10f-operator-scripts\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.144870 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3dc205b-caf2-45c8-8110-d0f8be91e10f-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.144974 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3dc205b-caf2-45c8-8110-d0f8be91e10f-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.145042 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.145330 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c3dc205b-caf2-45c8-8110-d0f8be91e10f-kolla-config\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.247228 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3dc205b-caf2-45c8-8110-d0f8be91e10f-operator-scripts\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.247336 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3dc205b-caf2-45c8-8110-d0f8be91e10f-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.247376 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3dc205b-caf2-45c8-8110-d0f8be91e10f-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.247673 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.247832 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c3dc205b-caf2-45c8-8110-d0f8be91e10f-kolla-config\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.247998 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c3dc205b-caf2-45c8-8110-d0f8be91e10f-config-data-generated\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.248039 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2nn6\" (UniqueName: \"kubernetes.io/projected/c3dc205b-caf2-45c8-8110-d0f8be91e10f-kube-api-access-w2nn6\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.248181 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c3dc205b-caf2-45c8-8110-d0f8be91e10f-config-data-default\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.248432 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c3dc205b-caf2-45c8-8110-d0f8be91e10f-config-data-generated\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.249043 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c3dc205b-caf2-45c8-8110-d0f8be91e10f-kolla-config\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.249822 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3dc205b-caf2-45c8-8110-d0f8be91e10f-operator-scripts\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.250016 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.250796 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c3dc205b-caf2-45c8-8110-d0f8be91e10f-config-data-default\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.263422 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3dc205b-caf2-45c8-8110-d0f8be91e10f-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.288178 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2nn6\" (UniqueName: \"kubernetes.io/projected/c3dc205b-caf2-45c8-8110-d0f8be91e10f-kube-api-access-w2nn6\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.303401 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3dc205b-caf2-45c8-8110-d0f8be91e10f-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.305403 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"c3dc205b-caf2-45c8-8110-d0f8be91e10f\") " pod="openstack/openstack-galera-0" Nov 24 13:37:29 crc kubenswrapper[5039]: I1124 13:37:29.598234 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.506600 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.508388 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.512539 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.515995 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-gc82x" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.516249 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.516560 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.521654 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.588460 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1dcf47d4-1399-46bb-bda8-5dfeb96a3b60-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.588538 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dcf47d4-1399-46bb-bda8-5dfeb96a3b60-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.588570 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1dcf47d4-1399-46bb-bda8-5dfeb96a3b60-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.588780 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dcf47d4-1399-46bb-bda8-5dfeb96a3b60-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.588823 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.588841 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1dcf47d4-1399-46bb-bda8-5dfeb96a3b60-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.588917 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1dcf47d4-1399-46bb-bda8-5dfeb96a3b60-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.588951 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qcx2c\" (UniqueName: \"kubernetes.io/projected/1dcf47d4-1399-46bb-bda8-5dfeb96a3b60-kube-api-access-qcx2c\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.693135 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1dcf47d4-1399-46bb-bda8-5dfeb96a3b60-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.693213 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dcf47d4-1399-46bb-bda8-5dfeb96a3b60-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.693232 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1dcf47d4-1399-46bb-bda8-5dfeb96a3b60-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.695350 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1dcf47d4-1399-46bb-bda8-5dfeb96a3b60-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.696376 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1dcf47d4-1399-46bb-bda8-5dfeb96a3b60-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.697039 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dcf47d4-1399-46bb-bda8-5dfeb96a3b60-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.697622 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1dcf47d4-1399-46bb-bda8-5dfeb96a3b60-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.697644 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.697759 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1dcf47d4-1399-46bb-bda8-5dfeb96a3b60-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.697801 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qcx2c\" (UniqueName: \"kubernetes.io/projected/1dcf47d4-1399-46bb-bda8-5dfeb96a3b60-kube-api-access-qcx2c\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.698230 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.700036 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1dcf47d4-1399-46bb-bda8-5dfeb96a3b60-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.703168 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1dcf47d4-1399-46bb-bda8-5dfeb96a3b60-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.705489 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dcf47d4-1399-46bb-bda8-5dfeb96a3b60-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.705541 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dcf47d4-1399-46bb-bda8-5dfeb96a3b60-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.722168 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcx2c\" (UniqueName: \"kubernetes.io/projected/1dcf47d4-1399-46bb-bda8-5dfeb96a3b60-kube-api-access-qcx2c\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.738420 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60\") " pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.765624 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.767079 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.772353 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-7cjn7" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.772548 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.772669 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.782050 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.850887 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.907345 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cef0b8e-d050-4055-a798-31b108727299-combined-ca-bundle\") pod \"memcached-0\" (UID: \"3cef0b8e-d050-4055-a798-31b108727299\") " pod="openstack/memcached-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.907393 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkjsv\" (UniqueName: \"kubernetes.io/projected/3cef0b8e-d050-4055-a798-31b108727299-kube-api-access-gkjsv\") pod \"memcached-0\" (UID: \"3cef0b8e-d050-4055-a798-31b108727299\") " pod="openstack/memcached-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.907468 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3cef0b8e-d050-4055-a798-31b108727299-kolla-config\") pod \"memcached-0\" (UID: \"3cef0b8e-d050-4055-a798-31b108727299\") " pod="openstack/memcached-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.907490 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/3cef0b8e-d050-4055-a798-31b108727299-memcached-tls-certs\") pod \"memcached-0\" (UID: \"3cef0b8e-d050-4055-a798-31b108727299\") " pod="openstack/memcached-0" Nov 24 13:37:30 crc kubenswrapper[5039]: I1124 13:37:30.907537 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3cef0b8e-d050-4055-a798-31b108727299-config-data\") pod \"memcached-0\" (UID: \"3cef0b8e-d050-4055-a798-31b108727299\") " pod="openstack/memcached-0" Nov 24 13:37:31 crc kubenswrapper[5039]: I1124 13:37:31.008613 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3cef0b8e-d050-4055-a798-31b108727299-kolla-config\") pod \"memcached-0\" (UID: \"3cef0b8e-d050-4055-a798-31b108727299\") " pod="openstack/memcached-0" Nov 24 13:37:31 crc kubenswrapper[5039]: I1124 13:37:31.008678 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/3cef0b8e-d050-4055-a798-31b108727299-memcached-tls-certs\") pod \"memcached-0\" (UID: \"3cef0b8e-d050-4055-a798-31b108727299\") " pod="openstack/memcached-0" Nov 24 13:37:31 crc kubenswrapper[5039]: I1124 13:37:31.008726 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3cef0b8e-d050-4055-a798-31b108727299-config-data\") pod \"memcached-0\" (UID: \"3cef0b8e-d050-4055-a798-31b108727299\") " pod="openstack/memcached-0" Nov 24 13:37:31 crc kubenswrapper[5039]: I1124 13:37:31.008798 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cef0b8e-d050-4055-a798-31b108727299-combined-ca-bundle\") pod \"memcached-0\" (UID: \"3cef0b8e-d050-4055-a798-31b108727299\") " pod="openstack/memcached-0" Nov 24 13:37:31 crc kubenswrapper[5039]: I1124 13:37:31.008832 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkjsv\" (UniqueName: \"kubernetes.io/projected/3cef0b8e-d050-4055-a798-31b108727299-kube-api-access-gkjsv\") pod \"memcached-0\" (UID: \"3cef0b8e-d050-4055-a798-31b108727299\") " pod="openstack/memcached-0" Nov 24 13:37:31 crc kubenswrapper[5039]: I1124 13:37:31.009467 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3cef0b8e-d050-4055-a798-31b108727299-kolla-config\") pod \"memcached-0\" (UID: \"3cef0b8e-d050-4055-a798-31b108727299\") " pod="openstack/memcached-0" Nov 24 13:37:31 crc kubenswrapper[5039]: I1124 13:37:31.009974 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3cef0b8e-d050-4055-a798-31b108727299-config-data\") pod \"memcached-0\" (UID: \"3cef0b8e-d050-4055-a798-31b108727299\") " pod="openstack/memcached-0" Nov 24 13:37:31 crc kubenswrapper[5039]: I1124 13:37:31.014584 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/3cef0b8e-d050-4055-a798-31b108727299-memcached-tls-certs\") pod \"memcached-0\" (UID: \"3cef0b8e-d050-4055-a798-31b108727299\") " pod="openstack/memcached-0" Nov 24 13:37:31 crc kubenswrapper[5039]: I1124 13:37:31.025117 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cef0b8e-d050-4055-a798-31b108727299-combined-ca-bundle\") pod \"memcached-0\" (UID: \"3cef0b8e-d050-4055-a798-31b108727299\") " pod="openstack/memcached-0" Nov 24 13:37:31 crc kubenswrapper[5039]: I1124 13:37:31.027088 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkjsv\" (UniqueName: \"kubernetes.io/projected/3cef0b8e-d050-4055-a798-31b108727299-kube-api-access-gkjsv\") pod \"memcached-0\" (UID: \"3cef0b8e-d050-4055-a798-31b108727299\") " pod="openstack/memcached-0" Nov 24 13:37:31 crc kubenswrapper[5039]: I1124 13:37:31.123361 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 24 13:37:33 crc kubenswrapper[5039]: I1124 13:37:33.429003 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 13:37:33 crc kubenswrapper[5039]: I1124 13:37:33.430096 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 24 13:37:33 crc kubenswrapper[5039]: I1124 13:37:33.443018 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-8zb9h" Nov 24 13:37:33 crc kubenswrapper[5039]: I1124 13:37:33.444087 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 13:37:33 crc kubenswrapper[5039]: I1124 13:37:33.552086 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mk6n2\" (UniqueName: \"kubernetes.io/projected/47799b2c-4219-475d-9a09-580720622ee4-kube-api-access-mk6n2\") pod \"kube-state-metrics-0\" (UID: \"47799b2c-4219-475d-9a09-580720622ee4\") " pod="openstack/kube-state-metrics-0" Nov 24 13:37:33 crc kubenswrapper[5039]: I1124 13:37:33.657144 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mk6n2\" (UniqueName: \"kubernetes.io/projected/47799b2c-4219-475d-9a09-580720622ee4-kube-api-access-mk6n2\") pod \"kube-state-metrics-0\" (UID: \"47799b2c-4219-475d-9a09-580720622ee4\") " pod="openstack/kube-state-metrics-0" Nov 24 13:37:33 crc kubenswrapper[5039]: I1124 13:37:33.716807 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mk6n2\" (UniqueName: \"kubernetes.io/projected/47799b2c-4219-475d-9a09-580720622ee4-kube-api-access-mk6n2\") pod \"kube-state-metrics-0\" (UID: \"47799b2c-4219-475d-9a09-580720622ee4\") " pod="openstack/kube-state-metrics-0" Nov 24 13:37:33 crc kubenswrapper[5039]: I1124 13:37:33.753777 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.139577 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-s2gqw"] Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.140744 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-s2gqw" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.147007 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards-sa-dockercfg-qccxc" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.146990 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.150124 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-s2gqw"] Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.266354 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7d53a76d-409b-45a1-8000-d4f8f2b1ac18-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-s2gqw\" (UID: \"7d53a76d-409b-45a1-8000-d4f8f2b1ac18\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-s2gqw" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.266658 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpmlm\" (UniqueName: \"kubernetes.io/projected/7d53a76d-409b-45a1-8000-d4f8f2b1ac18-kube-api-access-lpmlm\") pod \"observability-ui-dashboards-7d5fb4cbfb-s2gqw\" (UID: \"7d53a76d-409b-45a1-8000-d4f8f2b1ac18\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-s2gqw" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.368817 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7d53a76d-409b-45a1-8000-d4f8f2b1ac18-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-s2gqw\" (UID: \"7d53a76d-409b-45a1-8000-d4f8f2b1ac18\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-s2gqw" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.368889 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpmlm\" (UniqueName: \"kubernetes.io/projected/7d53a76d-409b-45a1-8000-d4f8f2b1ac18-kube-api-access-lpmlm\") pod \"observability-ui-dashboards-7d5fb4cbfb-s2gqw\" (UID: \"7d53a76d-409b-45a1-8000-d4f8f2b1ac18\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-s2gqw" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.392585 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpmlm\" (UniqueName: \"kubernetes.io/projected/7d53a76d-409b-45a1-8000-d4f8f2b1ac18-kube-api-access-lpmlm\") pod \"observability-ui-dashboards-7d5fb4cbfb-s2gqw\" (UID: \"7d53a76d-409b-45a1-8000-d4f8f2b1ac18\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-s2gqw" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.393058 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7d53a76d-409b-45a1-8000-d4f8f2b1ac18-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-s2gqw\" (UID: \"7d53a76d-409b-45a1-8000-d4f8f2b1ac18\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-s2gqw" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.464359 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-68ccb84d7-kmgl5"] Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.470754 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.477464 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-s2gqw" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.489025 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-68ccb84d7-kmgl5"] Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.572548 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e2942748-049c-489e-92c7-94cd26f86651-console-oauth-config\") pod \"console-68ccb84d7-kmgl5\" (UID: \"e2942748-049c-489e-92c7-94cd26f86651\") " pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.572600 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e2942748-049c-489e-92c7-94cd26f86651-console-serving-cert\") pod \"console-68ccb84d7-kmgl5\" (UID: \"e2942748-049c-489e-92c7-94cd26f86651\") " pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.572642 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxsh9\" (UniqueName: \"kubernetes.io/projected/e2942748-049c-489e-92c7-94cd26f86651-kube-api-access-nxsh9\") pod \"console-68ccb84d7-kmgl5\" (UID: \"e2942748-049c-489e-92c7-94cd26f86651\") " pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.572680 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e2942748-049c-489e-92c7-94cd26f86651-oauth-serving-cert\") pod \"console-68ccb84d7-kmgl5\" (UID: \"e2942748-049c-489e-92c7-94cd26f86651\") " pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.572761 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e2942748-049c-489e-92c7-94cd26f86651-trusted-ca-bundle\") pod \"console-68ccb84d7-kmgl5\" (UID: \"e2942748-049c-489e-92c7-94cd26f86651\") " pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.572907 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e2942748-049c-489e-92c7-94cd26f86651-service-ca\") pod \"console-68ccb84d7-kmgl5\" (UID: \"e2942748-049c-489e-92c7-94cd26f86651\") " pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.572936 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e2942748-049c-489e-92c7-94cd26f86651-console-config\") pod \"console-68ccb84d7-kmgl5\" (UID: \"e2942748-049c-489e-92c7-94cd26f86651\") " pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.667977 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.669835 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.672669 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.672739 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.672891 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-w4dz7" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.673933 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e2942748-049c-489e-92c7-94cd26f86651-oauth-serving-cert\") pod \"console-68ccb84d7-kmgl5\" (UID: \"e2942748-049c-489e-92c7-94cd26f86651\") " pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.673992 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e2942748-049c-489e-92c7-94cd26f86651-trusted-ca-bundle\") pod \"console-68ccb84d7-kmgl5\" (UID: \"e2942748-049c-489e-92c7-94cd26f86651\") " pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.674054 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e2942748-049c-489e-92c7-94cd26f86651-service-ca\") pod \"console-68ccb84d7-kmgl5\" (UID: \"e2942748-049c-489e-92c7-94cd26f86651\") " pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.674078 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e2942748-049c-489e-92c7-94cd26f86651-console-config\") pod \"console-68ccb84d7-kmgl5\" (UID: \"e2942748-049c-489e-92c7-94cd26f86651\") " pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.674116 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e2942748-049c-489e-92c7-94cd26f86651-console-oauth-config\") pod \"console-68ccb84d7-kmgl5\" (UID: \"e2942748-049c-489e-92c7-94cd26f86651\") " pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.674137 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e2942748-049c-489e-92c7-94cd26f86651-console-serving-cert\") pod \"console-68ccb84d7-kmgl5\" (UID: \"e2942748-049c-489e-92c7-94cd26f86651\") " pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.674156 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxsh9\" (UniqueName: \"kubernetes.io/projected/e2942748-049c-489e-92c7-94cd26f86651-kube-api-access-nxsh9\") pod \"console-68ccb84d7-kmgl5\" (UID: \"e2942748-049c-489e-92c7-94cd26f86651\") " pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.675112 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.675715 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e2942748-049c-489e-92c7-94cd26f86651-oauth-serving-cert\") pod \"console-68ccb84d7-kmgl5\" (UID: \"e2942748-049c-489e-92c7-94cd26f86651\") " pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.676071 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e2942748-049c-489e-92c7-94cd26f86651-console-config\") pod \"console-68ccb84d7-kmgl5\" (UID: \"e2942748-049c-489e-92c7-94cd26f86651\") " pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.676697 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e2942748-049c-489e-92c7-94cd26f86651-service-ca\") pod \"console-68ccb84d7-kmgl5\" (UID: \"e2942748-049c-489e-92c7-94cd26f86651\") " pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.676758 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e2942748-049c-489e-92c7-94cd26f86651-trusted-ca-bundle\") pod \"console-68ccb84d7-kmgl5\" (UID: \"e2942748-049c-489e-92c7-94cd26f86651\") " pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.676946 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.681591 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e2942748-049c-489e-92c7-94cd26f86651-console-serving-cert\") pod \"console-68ccb84d7-kmgl5\" (UID: \"e2942748-049c-489e-92c7-94cd26f86651\") " pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.684952 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e2942748-049c-489e-92c7-94cd26f86651-console-oauth-config\") pod \"console-68ccb84d7-kmgl5\" (UID: \"e2942748-049c-489e-92c7-94cd26f86651\") " pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.691642 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.698213 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxsh9\" (UniqueName: \"kubernetes.io/projected/e2942748-049c-489e-92c7-94cd26f86651-kube-api-access-nxsh9\") pod \"console-68ccb84d7-kmgl5\" (UID: \"e2942748-049c-489e-92c7-94cd26f86651\") " pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.725950 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.777273 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2h7ww\" (UniqueName: \"kubernetes.io/projected/869a1d3b-808b-4a44-b300-c2fb36a07e8a-kube-api-access-2h7ww\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.777335 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/869a1d3b-808b-4a44-b300-c2fb36a07e8a-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.777404 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/869a1d3b-808b-4a44-b300-c2fb36a07e8a-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.777540 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/869a1d3b-808b-4a44-b300-c2fb36a07e8a-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.777570 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/869a1d3b-808b-4a44-b300-c2fb36a07e8a-config\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.777605 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/869a1d3b-808b-4a44-b300-c2fb36a07e8a-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.777640 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.777805 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/869a1d3b-808b-4a44-b300-c2fb36a07e8a-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.804640 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.879073 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/869a1d3b-808b-4a44-b300-c2fb36a07e8a-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.879120 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/869a1d3b-808b-4a44-b300-c2fb36a07e8a-config\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.879168 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/869a1d3b-808b-4a44-b300-c2fb36a07e8a-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.879190 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.879253 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/869a1d3b-808b-4a44-b300-c2fb36a07e8a-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.879313 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2h7ww\" (UniqueName: \"kubernetes.io/projected/869a1d3b-808b-4a44-b300-c2fb36a07e8a-kube-api-access-2h7ww\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.879337 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/869a1d3b-808b-4a44-b300-c2fb36a07e8a-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.879357 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/869a1d3b-808b-4a44-b300-c2fb36a07e8a-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.882384 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/869a1d3b-808b-4a44-b300-c2fb36a07e8a-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.884735 5039 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.884761 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f0fdd26626b161b6dd0cbc6930a2dd292876373bbfce0d53987e82de5f5e1a8b/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.892072 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/869a1d3b-808b-4a44-b300-c2fb36a07e8a-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.894274 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/869a1d3b-808b-4a44-b300-c2fb36a07e8a-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.894547 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/869a1d3b-808b-4a44-b300-c2fb36a07e8a-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.894889 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/869a1d3b-808b-4a44-b300-c2fb36a07e8a-config\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.895097 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/869a1d3b-808b-4a44-b300-c2fb36a07e8a-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.898002 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2h7ww\" (UniqueName: \"kubernetes.io/projected/869a1d3b-808b-4a44-b300-c2fb36a07e8a-kube-api-access-2h7ww\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:34 crc kubenswrapper[5039]: I1124 13:37:34.928971 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc\") pod \"prometheus-metric-storage-0\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:35 crc kubenswrapper[5039]: I1124 13:37:35.042125 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.402490 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.403927 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.413596 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.415411 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.415419 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.417035 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-2q9th" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.425791 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.427339 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.507088 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.507183 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vxmd\" (UniqueName: \"kubernetes.io/projected/59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9-kube-api-access-6vxmd\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.507340 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.507394 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9-config\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.507593 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.507658 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.507687 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.507705 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.609201 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vxmd\" (UniqueName: \"kubernetes.io/projected/59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9-kube-api-access-6vxmd\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.609299 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.609333 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9-config\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.609377 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.609400 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.609420 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.609438 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.609471 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.609830 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.609921 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.610821 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9-config\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.611732 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.615450 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.616046 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.620389 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.640051 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.672332 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vxmd\" (UniqueName: \"kubernetes.io/projected/59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9-kube-api-access-6vxmd\") pod \"ovsdbserver-nb-0\" (UID: \"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9\") " pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:36 crc kubenswrapper[5039]: I1124 13:37:36.727393 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.199298 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-5dqj9"] Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.200413 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.205665 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.205880 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.205499 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-mnm9b" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.210251 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-2cfx8"] Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.212957 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.218189 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5dqj9"] Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.226584 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-2cfx8"] Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.349776 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4fc86906-5a7c-4bfe-8d23-1c98a8711a4a-scripts\") pod \"ovn-controller-5dqj9\" (UID: \"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a\") " pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.349829 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/cfe8f618-f843-4051-9491-cb3d06e1a1bc-var-lib\") pod \"ovn-controller-ovs-2cfx8\" (UID: \"cfe8f618-f843-4051-9491-cb3d06e1a1bc\") " pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.349858 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cfe8f618-f843-4051-9491-cb3d06e1a1bc-scripts\") pod \"ovn-controller-ovs-2cfx8\" (UID: \"cfe8f618-f843-4051-9491-cb3d06e1a1bc\") " pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.349921 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4fc86906-5a7c-4bfe-8d23-1c98a8711a4a-var-run\") pod \"ovn-controller-5dqj9\" (UID: \"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a\") " pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.349965 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4fc86906-5a7c-4bfe-8d23-1c98a8711a4a-var-run-ovn\") pod \"ovn-controller-5dqj9\" (UID: \"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a\") " pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.349988 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cfe8f618-f843-4051-9491-cb3d06e1a1bc-var-run\") pod \"ovn-controller-ovs-2cfx8\" (UID: \"cfe8f618-f843-4051-9491-cb3d06e1a1bc\") " pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.350025 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fc86906-5a7c-4bfe-8d23-1c98a8711a4a-combined-ca-bundle\") pod \"ovn-controller-5dqj9\" (UID: \"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a\") " pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.350052 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/cfe8f618-f843-4051-9491-cb3d06e1a1bc-var-log\") pod \"ovn-controller-ovs-2cfx8\" (UID: \"cfe8f618-f843-4051-9491-cb3d06e1a1bc\") " pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.350108 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lw4vz\" (UniqueName: \"kubernetes.io/projected/cfe8f618-f843-4051-9491-cb3d06e1a1bc-kube-api-access-lw4vz\") pod \"ovn-controller-ovs-2cfx8\" (UID: \"cfe8f618-f843-4051-9491-cb3d06e1a1bc\") " pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.350144 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/4fc86906-5a7c-4bfe-8d23-1c98a8711a4a-ovn-controller-tls-certs\") pod \"ovn-controller-5dqj9\" (UID: \"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a\") " pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.350179 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frzpd\" (UniqueName: \"kubernetes.io/projected/4fc86906-5a7c-4bfe-8d23-1c98a8711a4a-kube-api-access-frzpd\") pod \"ovn-controller-5dqj9\" (UID: \"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a\") " pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.350212 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4fc86906-5a7c-4bfe-8d23-1c98a8711a4a-var-log-ovn\") pod \"ovn-controller-5dqj9\" (UID: \"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a\") " pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.350235 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/cfe8f618-f843-4051-9491-cb3d06e1a1bc-etc-ovs\") pod \"ovn-controller-ovs-2cfx8\" (UID: \"cfe8f618-f843-4051-9491-cb3d06e1a1bc\") " pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.451993 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lw4vz\" (UniqueName: \"kubernetes.io/projected/cfe8f618-f843-4051-9491-cb3d06e1a1bc-kube-api-access-lw4vz\") pod \"ovn-controller-ovs-2cfx8\" (UID: \"cfe8f618-f843-4051-9491-cb3d06e1a1bc\") " pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.452052 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/4fc86906-5a7c-4bfe-8d23-1c98a8711a4a-ovn-controller-tls-certs\") pod \"ovn-controller-5dqj9\" (UID: \"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a\") " pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.452084 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frzpd\" (UniqueName: \"kubernetes.io/projected/4fc86906-5a7c-4bfe-8d23-1c98a8711a4a-kube-api-access-frzpd\") pod \"ovn-controller-5dqj9\" (UID: \"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a\") " pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.452112 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4fc86906-5a7c-4bfe-8d23-1c98a8711a4a-var-log-ovn\") pod \"ovn-controller-5dqj9\" (UID: \"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a\") " pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.452132 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/cfe8f618-f843-4051-9491-cb3d06e1a1bc-etc-ovs\") pod \"ovn-controller-ovs-2cfx8\" (UID: \"cfe8f618-f843-4051-9491-cb3d06e1a1bc\") " pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.452159 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4fc86906-5a7c-4bfe-8d23-1c98a8711a4a-scripts\") pod \"ovn-controller-5dqj9\" (UID: \"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a\") " pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.452175 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/cfe8f618-f843-4051-9491-cb3d06e1a1bc-var-lib\") pod \"ovn-controller-ovs-2cfx8\" (UID: \"cfe8f618-f843-4051-9491-cb3d06e1a1bc\") " pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.452193 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cfe8f618-f843-4051-9491-cb3d06e1a1bc-scripts\") pod \"ovn-controller-ovs-2cfx8\" (UID: \"cfe8f618-f843-4051-9491-cb3d06e1a1bc\") " pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.452276 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4fc86906-5a7c-4bfe-8d23-1c98a8711a4a-var-run\") pod \"ovn-controller-5dqj9\" (UID: \"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a\") " pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.452316 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4fc86906-5a7c-4bfe-8d23-1c98a8711a4a-var-run-ovn\") pod \"ovn-controller-5dqj9\" (UID: \"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a\") " pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.452334 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cfe8f618-f843-4051-9491-cb3d06e1a1bc-var-run\") pod \"ovn-controller-ovs-2cfx8\" (UID: \"cfe8f618-f843-4051-9491-cb3d06e1a1bc\") " pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.452389 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fc86906-5a7c-4bfe-8d23-1c98a8711a4a-combined-ca-bundle\") pod \"ovn-controller-5dqj9\" (UID: \"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a\") " pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.452409 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/cfe8f618-f843-4051-9491-cb3d06e1a1bc-var-log\") pod \"ovn-controller-ovs-2cfx8\" (UID: \"cfe8f618-f843-4051-9491-cb3d06e1a1bc\") " pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.452865 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/cfe8f618-f843-4051-9491-cb3d06e1a1bc-var-log\") pod \"ovn-controller-ovs-2cfx8\" (UID: \"cfe8f618-f843-4051-9491-cb3d06e1a1bc\") " pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.453085 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/cfe8f618-f843-4051-9491-cb3d06e1a1bc-etc-ovs\") pod \"ovn-controller-ovs-2cfx8\" (UID: \"cfe8f618-f843-4051-9491-cb3d06e1a1bc\") " pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.453084 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/cfe8f618-f843-4051-9491-cb3d06e1a1bc-var-lib\") pod \"ovn-controller-ovs-2cfx8\" (UID: \"cfe8f618-f843-4051-9491-cb3d06e1a1bc\") " pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.453207 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4fc86906-5a7c-4bfe-8d23-1c98a8711a4a-var-run-ovn\") pod \"ovn-controller-5dqj9\" (UID: \"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a\") " pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.453286 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cfe8f618-f843-4051-9491-cb3d06e1a1bc-var-run\") pod \"ovn-controller-ovs-2cfx8\" (UID: \"cfe8f618-f843-4051-9491-cb3d06e1a1bc\") " pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.453312 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4fc86906-5a7c-4bfe-8d23-1c98a8711a4a-var-run\") pod \"ovn-controller-5dqj9\" (UID: \"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a\") " pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.453310 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4fc86906-5a7c-4bfe-8d23-1c98a8711a4a-var-log-ovn\") pod \"ovn-controller-5dqj9\" (UID: \"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a\") " pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.454699 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cfe8f618-f843-4051-9491-cb3d06e1a1bc-scripts\") pod \"ovn-controller-ovs-2cfx8\" (UID: \"cfe8f618-f843-4051-9491-cb3d06e1a1bc\") " pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.454954 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4fc86906-5a7c-4bfe-8d23-1c98a8711a4a-scripts\") pod \"ovn-controller-5dqj9\" (UID: \"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a\") " pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.466367 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fc86906-5a7c-4bfe-8d23-1c98a8711a4a-combined-ca-bundle\") pod \"ovn-controller-5dqj9\" (UID: \"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a\") " pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.469541 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/4fc86906-5a7c-4bfe-8d23-1c98a8711a4a-ovn-controller-tls-certs\") pod \"ovn-controller-5dqj9\" (UID: \"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a\") " pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.469623 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frzpd\" (UniqueName: \"kubernetes.io/projected/4fc86906-5a7c-4bfe-8d23-1c98a8711a4a-kube-api-access-frzpd\") pod \"ovn-controller-5dqj9\" (UID: \"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a\") " pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.470005 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lw4vz\" (UniqueName: \"kubernetes.io/projected/cfe8f618-f843-4051-9491-cb3d06e1a1bc-kube-api-access-lw4vz\") pod \"ovn-controller-ovs-2cfx8\" (UID: \"cfe8f618-f843-4051-9491-cb3d06e1a1bc\") " pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.524196 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5dqj9" Nov 24 13:37:38 crc kubenswrapper[5039]: I1124 13:37:38.534793 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:37:40 crc kubenswrapper[5039]: I1124 13:37:40.872593 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 24 13:37:40 crc kubenswrapper[5039]: I1124 13:37:40.875126 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:40 crc kubenswrapper[5039]: I1124 13:37:40.877490 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-pmk6s" Nov 24 13:37:40 crc kubenswrapper[5039]: I1124 13:37:40.877826 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 24 13:37:40 crc kubenswrapper[5039]: I1124 13:37:40.878001 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 24 13:37:40 crc kubenswrapper[5039]: I1124 13:37:40.878219 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 24 13:37:40 crc kubenswrapper[5039]: I1124 13:37:40.889344 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 24 13:37:40 crc kubenswrapper[5039]: I1124 13:37:40.994212 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5a69761-eccd-49e6-8749-86142600d287-config\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:40 crc kubenswrapper[5039]: I1124 13:37:40.994266 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5a69761-eccd-49e6-8749-86142600d287-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:40 crc kubenswrapper[5039]: I1124 13:37:40.994305 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5a69761-eccd-49e6-8749-86142600d287-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:40 crc kubenswrapper[5039]: I1124 13:37:40.994344 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5a69761-eccd-49e6-8749-86142600d287-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:40 crc kubenswrapper[5039]: I1124 13:37:40.994386 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5a69761-eccd-49e6-8749-86142600d287-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:40 crc kubenswrapper[5039]: I1124 13:37:40.994421 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:40 crc kubenswrapper[5039]: I1124 13:37:40.994444 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a5a69761-eccd-49e6-8749-86142600d287-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:40 crc kubenswrapper[5039]: I1124 13:37:40.994466 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bd87\" (UniqueName: \"kubernetes.io/projected/a5a69761-eccd-49e6-8749-86142600d287-kube-api-access-7bd87\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:41 crc kubenswrapper[5039]: I1124 13:37:41.096146 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5a69761-eccd-49e6-8749-86142600d287-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:41 crc kubenswrapper[5039]: I1124 13:37:41.096225 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5a69761-eccd-49e6-8749-86142600d287-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:41 crc kubenswrapper[5039]: I1124 13:37:41.096280 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:41 crc kubenswrapper[5039]: I1124 13:37:41.096320 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a5a69761-eccd-49e6-8749-86142600d287-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:41 crc kubenswrapper[5039]: I1124 13:37:41.096358 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bd87\" (UniqueName: \"kubernetes.io/projected/a5a69761-eccd-49e6-8749-86142600d287-kube-api-access-7bd87\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:41 crc kubenswrapper[5039]: I1124 13:37:41.096420 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5a69761-eccd-49e6-8749-86142600d287-config\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:41 crc kubenswrapper[5039]: I1124 13:37:41.096460 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5a69761-eccd-49e6-8749-86142600d287-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:41 crc kubenswrapper[5039]: I1124 13:37:41.096492 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5a69761-eccd-49e6-8749-86142600d287-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:41 crc kubenswrapper[5039]: I1124 13:37:41.096916 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a5a69761-eccd-49e6-8749-86142600d287-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:41 crc kubenswrapper[5039]: I1124 13:37:41.097178 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:41 crc kubenswrapper[5039]: I1124 13:37:41.097642 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5a69761-eccd-49e6-8749-86142600d287-config\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:41 crc kubenswrapper[5039]: I1124 13:37:41.097924 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5a69761-eccd-49e6-8749-86142600d287-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:41 crc kubenswrapper[5039]: I1124 13:37:41.119526 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5a69761-eccd-49e6-8749-86142600d287-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:41 crc kubenswrapper[5039]: I1124 13:37:41.119541 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5a69761-eccd-49e6-8749-86142600d287-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:41 crc kubenswrapper[5039]: I1124 13:37:41.120023 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5a69761-eccd-49e6-8749-86142600d287-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:41 crc kubenswrapper[5039]: I1124 13:37:41.122634 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bd87\" (UniqueName: \"kubernetes.io/projected/a5a69761-eccd-49e6-8749-86142600d287-kube-api-access-7bd87\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:41 crc kubenswrapper[5039]: I1124 13:37:41.129499 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"a5a69761-eccd-49e6-8749-86142600d287\") " pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:41 crc kubenswrapper[5039]: I1124 13:37:41.220081 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 24 13:37:59 crc kubenswrapper[5039]: E1124 13:37:59.399849 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 24 13:37:59 crc kubenswrapper[5039]: E1124 13:37:59.400675 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7rvhx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-mb79k_openstack(b6d5b55c-805a-41a0-b574-d8e55b00ee39): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:37:59 crc kubenswrapper[5039]: E1124 13:37:59.401878 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-mb79k" podUID="b6d5b55c-805a-41a0-b574-d8e55b00ee39" Nov 24 13:37:59 crc kubenswrapper[5039]: E1124 13:37:59.432602 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 24 13:37:59 crc kubenswrapper[5039]: E1124 13:37:59.432857 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5sdtp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-whdv8_openstack(d419456d-1fc2-4eb9-b95b-e44fb3be8cb3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:37:59 crc kubenswrapper[5039]: E1124 13:37:59.434066 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-whdv8" podUID="d419456d-1fc2-4eb9-b95b-e44fb3be8cb3" Nov 24 13:37:59 crc kubenswrapper[5039]: E1124 13:37:59.547404 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 24 13:37:59 crc kubenswrapper[5039]: E1124 13:37:59.547915 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-28w9k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-kxs9n_openstack(4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:37:59 crc kubenswrapper[5039]: E1124 13:37:59.549044 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" podUID="4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106" Nov 24 13:37:59 crc kubenswrapper[5039]: E1124 13:37:59.853364 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" podUID="4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106" Nov 24 13:38:00 crc kubenswrapper[5039]: I1124 13:38:00.645894 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-mb79k" Nov 24 13:38:00 crc kubenswrapper[5039]: I1124 13:38:00.651159 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-whdv8" Nov 24 13:38:00 crc kubenswrapper[5039]: I1124 13:38:00.747912 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6d5b55c-805a-41a0-b574-d8e55b00ee39-config\") pod \"b6d5b55c-805a-41a0-b574-d8e55b00ee39\" (UID: \"b6d5b55c-805a-41a0-b574-d8e55b00ee39\") " Nov 24 13:38:00 crc kubenswrapper[5039]: I1124 13:38:00.748052 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5sdtp\" (UniqueName: \"kubernetes.io/projected/d419456d-1fc2-4eb9-b95b-e44fb3be8cb3-kube-api-access-5sdtp\") pod \"d419456d-1fc2-4eb9-b95b-e44fb3be8cb3\" (UID: \"d419456d-1fc2-4eb9-b95b-e44fb3be8cb3\") " Nov 24 13:38:00 crc kubenswrapper[5039]: I1124 13:38:00.748209 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d419456d-1fc2-4eb9-b95b-e44fb3be8cb3-dns-svc\") pod \"d419456d-1fc2-4eb9-b95b-e44fb3be8cb3\" (UID: \"d419456d-1fc2-4eb9-b95b-e44fb3be8cb3\") " Nov 24 13:38:00 crc kubenswrapper[5039]: I1124 13:38:00.748259 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7rvhx\" (UniqueName: \"kubernetes.io/projected/b6d5b55c-805a-41a0-b574-d8e55b00ee39-kube-api-access-7rvhx\") pod \"b6d5b55c-805a-41a0-b574-d8e55b00ee39\" (UID: \"b6d5b55c-805a-41a0-b574-d8e55b00ee39\") " Nov 24 13:38:00 crc kubenswrapper[5039]: I1124 13:38:00.748333 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d419456d-1fc2-4eb9-b95b-e44fb3be8cb3-config\") pod \"d419456d-1fc2-4eb9-b95b-e44fb3be8cb3\" (UID: \"d419456d-1fc2-4eb9-b95b-e44fb3be8cb3\") " Nov 24 13:38:00 crc kubenswrapper[5039]: I1124 13:38:00.748899 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d419456d-1fc2-4eb9-b95b-e44fb3be8cb3-config" (OuterVolumeSpecName: "config") pod "d419456d-1fc2-4eb9-b95b-e44fb3be8cb3" (UID: "d419456d-1fc2-4eb9-b95b-e44fb3be8cb3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:38:00 crc kubenswrapper[5039]: I1124 13:38:00.748932 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d419456d-1fc2-4eb9-b95b-e44fb3be8cb3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d419456d-1fc2-4eb9-b95b-e44fb3be8cb3" (UID: "d419456d-1fc2-4eb9-b95b-e44fb3be8cb3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:38:00 crc kubenswrapper[5039]: I1124 13:38:00.748969 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6d5b55c-805a-41a0-b574-d8e55b00ee39-config" (OuterVolumeSpecName: "config") pod "b6d5b55c-805a-41a0-b574-d8e55b00ee39" (UID: "b6d5b55c-805a-41a0-b574-d8e55b00ee39"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:38:00 crc kubenswrapper[5039]: I1124 13:38:00.754474 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6d5b55c-805a-41a0-b574-d8e55b00ee39-kube-api-access-7rvhx" (OuterVolumeSpecName: "kube-api-access-7rvhx") pod "b6d5b55c-805a-41a0-b574-d8e55b00ee39" (UID: "b6d5b55c-805a-41a0-b574-d8e55b00ee39"). InnerVolumeSpecName "kube-api-access-7rvhx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:38:00 crc kubenswrapper[5039]: I1124 13:38:00.754619 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d419456d-1fc2-4eb9-b95b-e44fb3be8cb3-kube-api-access-5sdtp" (OuterVolumeSpecName: "kube-api-access-5sdtp") pod "d419456d-1fc2-4eb9-b95b-e44fb3be8cb3" (UID: "d419456d-1fc2-4eb9-b95b-e44fb3be8cb3"). InnerVolumeSpecName "kube-api-access-5sdtp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:38:00 crc kubenswrapper[5039]: E1124 13:38:00.831158 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 24 13:38:00 crc kubenswrapper[5039]: E1124 13:38:00.831376 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-t94d4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-k7d8d_openstack(d135c416-c117-4ff6-812c-3d02e07ebbd4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:38:00 crc kubenswrapper[5039]: E1124 13:38:00.836855 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" podUID="d135c416-c117-4ff6-812c-3d02e07ebbd4" Nov 24 13:38:00 crc kubenswrapper[5039]: I1124 13:38:00.861636 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d419456d-1fc2-4eb9-b95b-e44fb3be8cb3-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:00 crc kubenswrapper[5039]: I1124 13:38:00.861697 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6d5b55c-805a-41a0-b574-d8e55b00ee39-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:00 crc kubenswrapper[5039]: I1124 13:38:00.861708 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5sdtp\" (UniqueName: \"kubernetes.io/projected/d419456d-1fc2-4eb9-b95b-e44fb3be8cb3-kube-api-access-5sdtp\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:00 crc kubenswrapper[5039]: I1124 13:38:00.861719 5039 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d419456d-1fc2-4eb9-b95b-e44fb3be8cb3-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:00 crc kubenswrapper[5039]: I1124 13:38:00.861728 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7rvhx\" (UniqueName: \"kubernetes.io/projected/b6d5b55c-805a-41a0-b574-d8e55b00ee39-kube-api-access-7rvhx\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:00 crc kubenswrapper[5039]: I1124 13:38:00.901311 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-mb79k" event={"ID":"b6d5b55c-805a-41a0-b574-d8e55b00ee39","Type":"ContainerDied","Data":"7a10426a4c9faa2cc8ee49be73b46c6626aec6b5d237ec5675687b6f3d1c69f0"} Nov 24 13:38:00 crc kubenswrapper[5039]: I1124 13:38:00.901521 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-mb79k" Nov 24 13:38:00 crc kubenswrapper[5039]: I1124 13:38:00.908324 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-whdv8" Nov 24 13:38:00 crc kubenswrapper[5039]: I1124 13:38:00.908384 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-whdv8" event={"ID":"d419456d-1fc2-4eb9-b95b-e44fb3be8cb3","Type":"ContainerDied","Data":"cb8d4042598da9dcc498db409af01377bb40639906dc9ad9d6d889db5a6843fe"} Nov 24 13:38:00 crc kubenswrapper[5039]: E1124 13:38:00.910069 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" podUID="d135c416-c117-4ff6-812c-3d02e07ebbd4" Nov 24 13:38:00 crc kubenswrapper[5039]: E1124 13:38:00.914560 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 24 13:38:00 crc kubenswrapper[5039]: E1124 13:38:00.914693 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sjp8k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(6808fd4e-3718-430c-87e8-ca3e801a8248): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:38:00 crc kubenswrapper[5039]: E1124 13:38:00.916524 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="6808fd4e-3718-430c-87e8-ca3e801a8248" Nov 24 13:38:01 crc kubenswrapper[5039]: I1124 13:38:01.028709 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-mb79k"] Nov 24 13:38:01 crc kubenswrapper[5039]: I1124 13:38:01.040175 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-mb79k"] Nov 24 13:38:01 crc kubenswrapper[5039]: E1124 13:38:01.057227 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 24 13:38:01 crc kubenswrapper[5039]: E1124 13:38:01.057441 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6dwtk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(8e2e73c0-db1d-45e0-b056-0ed13bdbb904): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:38:01 crc kubenswrapper[5039]: E1124 13:38:01.058681 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="8e2e73c0-db1d-45e0-b056-0ed13bdbb904" Nov 24 13:38:01 crc kubenswrapper[5039]: I1124 13:38:01.075346 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-whdv8"] Nov 24 13:38:01 crc kubenswrapper[5039]: I1124 13:38:01.084918 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-whdv8"] Nov 24 13:38:01 crc kubenswrapper[5039]: I1124 13:38:01.663876 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5dqj9"] Nov 24 13:38:01 crc kubenswrapper[5039]: I1124 13:38:01.690932 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-s2gqw"] Nov 24 13:38:01 crc kubenswrapper[5039]: W1124 13:38:01.693746 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3cef0b8e_d050_4055_a798_31b108727299.slice/crio-d970a293b21822152e6dcbcf6d9d9384fc5d848a51126d534f32fe3d884ac983 WatchSource:0}: Error finding container d970a293b21822152e6dcbcf6d9d9384fc5d848a51126d534f32fe3d884ac983: Status 404 returned error can't find the container with id d970a293b21822152e6dcbcf6d9d9384fc5d848a51126d534f32fe3d884ac983 Nov 24 13:38:01 crc kubenswrapper[5039]: I1124 13:38:01.705908 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 24 13:38:01 crc kubenswrapper[5039]: I1124 13:38:01.716268 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 24 13:38:01 crc kubenswrapper[5039]: I1124 13:38:01.720795 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 24 13:38:01 crc kubenswrapper[5039]: I1124 13:38:01.789085 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 13:38:01 crc kubenswrapper[5039]: W1124 13:38:01.799069 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod47799b2c_4219_475d_9a09_580720622ee4.slice/crio-13a858bf92c65e2dae183efe0fcd3025ac70c6c5fbd59ad24849564e6647e3cc WatchSource:0}: Error finding container 13a858bf92c65e2dae183efe0fcd3025ac70c6c5fbd59ad24849564e6647e3cc: Status 404 returned error can't find the container with id 13a858bf92c65e2dae183efe0fcd3025ac70c6c5fbd59ad24849564e6647e3cc Nov 24 13:38:01 crc kubenswrapper[5039]: I1124 13:38:01.800353 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-68ccb84d7-kmgl5"] Nov 24 13:38:01 crc kubenswrapper[5039]: W1124 13:38:01.802144 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode2942748_049c_489e_92c7_94cd26f86651.slice/crio-3976028a3e0b6642322c73343243f9a3aed5fad845ad860fb2f676723a705a49 WatchSource:0}: Error finding container 3976028a3e0b6642322c73343243f9a3aed5fad845ad860fb2f676723a705a49: Status 404 returned error can't find the container with id 3976028a3e0b6642322c73343243f9a3aed5fad845ad860fb2f676723a705a49 Nov 24 13:38:01 crc kubenswrapper[5039]: I1124 13:38:01.810752 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 24 13:38:01 crc kubenswrapper[5039]: W1124 13:38:01.812840 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod869a1d3b_808b_4a44_b300_c2fb36a07e8a.slice/crio-72bb41cc9891f48cd49424f09bb45de761eafe79011de41ed70441a2461d3a03 WatchSource:0}: Error finding container 72bb41cc9891f48cd49424f09bb45de761eafe79011de41ed70441a2461d3a03: Status 404 returned error can't find the container with id 72bb41cc9891f48cd49424f09bb45de761eafe79011de41ed70441a2461d3a03 Nov 24 13:38:01 crc kubenswrapper[5039]: I1124 13:38:01.915821 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60","Type":"ContainerStarted","Data":"e25e485d9473238a6fcfecdd2bba953ec086cd7f03a971f07600ff94405ad5a8"} Nov 24 13:38:01 crc kubenswrapper[5039]: I1124 13:38:01.917156 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"3cef0b8e-d050-4055-a798-31b108727299","Type":"ContainerStarted","Data":"d970a293b21822152e6dcbcf6d9d9384fc5d848a51126d534f32fe3d884ac983"} Nov 24 13:38:01 crc kubenswrapper[5039]: I1124 13:38:01.918472 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"869a1d3b-808b-4a44-b300-c2fb36a07e8a","Type":"ContainerStarted","Data":"72bb41cc9891f48cd49424f09bb45de761eafe79011de41ed70441a2461d3a03"} Nov 24 13:38:01 crc kubenswrapper[5039]: I1124 13:38:01.920106 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5dqj9" event={"ID":"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a","Type":"ContainerStarted","Data":"afebb4d3f993ba197f16ac3dc7c6db2b6fb26412e9c12781c6528915668f9d93"} Nov 24 13:38:01 crc kubenswrapper[5039]: I1124 13:38:01.921135 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-s2gqw" event={"ID":"7d53a76d-409b-45a1-8000-d4f8f2b1ac18","Type":"ContainerStarted","Data":"15803efbf640d679d33b106c13c3ffc8906482a118297441c556375e358de4c5"} Nov 24 13:38:01 crc kubenswrapper[5039]: I1124 13:38:01.922313 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c3dc205b-caf2-45c8-8110-d0f8be91e10f","Type":"ContainerStarted","Data":"2d561dcd841c893b48397ddd1b60bb34dc130b02080bf00d5699c81c7a11e35a"} Nov 24 13:38:01 crc kubenswrapper[5039]: I1124 13:38:01.923306 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"47799b2c-4219-475d-9a09-580720622ee4","Type":"ContainerStarted","Data":"13a858bf92c65e2dae183efe0fcd3025ac70c6c5fbd59ad24849564e6647e3cc"} Nov 24 13:38:01 crc kubenswrapper[5039]: I1124 13:38:01.924841 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-68ccb84d7-kmgl5" event={"ID":"e2942748-049c-489e-92c7-94cd26f86651","Type":"ContainerStarted","Data":"3976028a3e0b6642322c73343243f9a3aed5fad845ad860fb2f676723a705a49"} Nov 24 13:38:01 crc kubenswrapper[5039]: E1124 13:38:01.926901 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="6808fd4e-3718-430c-87e8-ca3e801a8248" Nov 24 13:38:01 crc kubenswrapper[5039]: E1124 13:38:01.927634 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="8e2e73c0-db1d-45e0-b056-0ed13bdbb904" Nov 24 13:38:02 crc kubenswrapper[5039]: I1124 13:38:02.334682 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6d5b55c-805a-41a0-b574-d8e55b00ee39" path="/var/lib/kubelet/pods/b6d5b55c-805a-41a0-b574-d8e55b00ee39/volumes" Nov 24 13:38:02 crc kubenswrapper[5039]: I1124 13:38:02.338806 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d419456d-1fc2-4eb9-b95b-e44fb3be8cb3" path="/var/lib/kubelet/pods/d419456d-1fc2-4eb9-b95b-e44fb3be8cb3/volumes" Nov 24 13:38:02 crc kubenswrapper[5039]: I1124 13:38:02.340978 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 24 13:38:02 crc kubenswrapper[5039]: W1124 13:38:02.351809 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod59b3b4cd_dfa0_4d92_a57b_8be015d0bdd9.slice/crio-c474a2149790bfe8196ec875bbe7bda2d42cc8201671ede0a171ef68271300c9 WatchSource:0}: Error finding container c474a2149790bfe8196ec875bbe7bda2d42cc8201671ede0a171ef68271300c9: Status 404 returned error can't find the container with id c474a2149790bfe8196ec875bbe7bda2d42cc8201671ede0a171ef68271300c9 Nov 24 13:38:02 crc kubenswrapper[5039]: I1124 13:38:02.910926 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 24 13:38:02 crc kubenswrapper[5039]: I1124 13:38:02.941492 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9","Type":"ContainerStarted","Data":"c474a2149790bfe8196ec875bbe7bda2d42cc8201671ede0a171ef68271300c9"} Nov 24 13:38:02 crc kubenswrapper[5039]: I1124 13:38:02.943705 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-68ccb84d7-kmgl5" event={"ID":"e2942748-049c-489e-92c7-94cd26f86651","Type":"ContainerStarted","Data":"a5e0e4d7222c90ff58e3dfc021ef2c40ff0489461a33889dc9700b1bb3c040e6"} Nov 24 13:38:02 crc kubenswrapper[5039]: I1124 13:38:02.973811 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-68ccb84d7-kmgl5" podStartSLOduration=28.973787335 podStartE2EDuration="28.973787335s" podCreationTimestamp="2025-11-24 13:37:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:38:02.961221049 +0000 UTC m=+1195.400345549" watchObservedRunningTime="2025-11-24 13:38:02.973787335 +0000 UTC m=+1195.412911835" Nov 24 13:38:03 crc kubenswrapper[5039]: I1124 13:38:03.951608 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-2cfx8"] Nov 24 13:38:03 crc kubenswrapper[5039]: I1124 13:38:03.957462 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"a5a69761-eccd-49e6-8749-86142600d287","Type":"ContainerStarted","Data":"481ce0fb8b0febb71763249a10d5e964a52c235ba57709bab696a35c2060dc5a"} Nov 24 13:38:04 crc kubenswrapper[5039]: I1124 13:38:04.805954 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:38:04 crc kubenswrapper[5039]: I1124 13:38:04.805999 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:38:04 crc kubenswrapper[5039]: I1124 13:38:04.811099 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:38:04 crc kubenswrapper[5039]: I1124 13:38:04.968698 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-68ccb84d7-kmgl5" Nov 24 13:38:05 crc kubenswrapper[5039]: I1124 13:38:05.032729 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-5748778ffb-d9m2t"] Nov 24 13:38:05 crc kubenswrapper[5039]: I1124 13:38:05.975752 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2cfx8" event={"ID":"cfe8f618-f843-4051-9491-cb3d06e1a1bc","Type":"ContainerStarted","Data":"2a2c16357126b0441831c0e26ffe0199c5d1d823f3eabd19f75e7b5d3c98200f"} Nov 24 13:38:13 crc kubenswrapper[5039]: E1124 13:38:13.931238 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified" Nov 24 13:38:13 crc kubenswrapper[5039]: E1124 13:38:13.932279 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovsdbserver-nb,Image:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,Command:[/usr/bin/dumb-init],Args:[/usr/local/bin/container-scripts/setup.sh],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n595hdfh599h678h5bbh646h6h5c6h5dh54h554h648h7dh79h668h668h59bhfdh669hb8h68ch5b7h5fbh64h699h6ch5f6h6hc5h6dh64dh68cq,ValueFrom:nil,},EnvVar{Name:OVN_LOGDIR,Value:/tmp,ValueFrom:nil,},EnvVar{Name:OVN_RUNDIR,Value:/tmp,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovndbcluster-nb-etc-ovn,ReadOnly:false,MountPath:/etc/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdb-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6vxmd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/cleanup.sh],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:20,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovsdbserver-nb-0_openstack(59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:38:30 crc kubenswrapper[5039]: I1124 13:38:30.086403 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-5748778ffb-d9m2t" podUID="a1d34075-ef09-4cf1-8c85-3875cac010ea" containerName="console" containerID="cri-o://e67c0d158c8548889d2622364b83554c5fd42e6d215ae5dbc2639c933c8c5add" gracePeriod=15 Nov 24 13:38:31 crc kubenswrapper[5039]: I1124 13:38:31.231018 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5748778ffb-d9m2t_a1d34075-ef09-4cf1-8c85-3875cac010ea/console/0.log" Nov 24 13:38:31 crc kubenswrapper[5039]: I1124 13:38:31.231399 5039 generic.go:334] "Generic (PLEG): container finished" podID="a1d34075-ef09-4cf1-8c85-3875cac010ea" containerID="e67c0d158c8548889d2622364b83554c5fd42e6d215ae5dbc2639c933c8c5add" exitCode=2 Nov 24 13:38:31 crc kubenswrapper[5039]: I1124 13:38:31.231435 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5748778ffb-d9m2t" event={"ID":"a1d34075-ef09-4cf1-8c85-3875cac010ea","Type":"ContainerDied","Data":"e67c0d158c8548889d2622364b83554c5fd42e6d215ae5dbc2639c933c8c5add"} Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.157273 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5748778ffb-d9m2t_a1d34075-ef09-4cf1-8c85-3875cac010ea/console/0.log" Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.157725 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.254307 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5748778ffb-d9m2t_a1d34075-ef09-4cf1-8c85-3875cac010ea/console/0.log" Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.254359 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5748778ffb-d9m2t" event={"ID":"a1d34075-ef09-4cf1-8c85-3875cac010ea","Type":"ContainerDied","Data":"94c64109a5ba71a2fd1f8fe83c34067087ba6d08eb9cd8d20e439ba10c77f5a5"} Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.254399 5039 scope.go:117] "RemoveContainer" containerID="e67c0d158c8548889d2622364b83554c5fd42e6d215ae5dbc2639c933c8c5add" Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.254537 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5748778ffb-d9m2t" Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.268780 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-trusted-ca-bundle\") pod \"a1d34075-ef09-4cf1-8c85-3875cac010ea\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.269188 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnvc6\" (UniqueName: \"kubernetes.io/projected/a1d34075-ef09-4cf1-8c85-3875cac010ea-kube-api-access-gnvc6\") pod \"a1d34075-ef09-4cf1-8c85-3875cac010ea\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.269266 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-console-config\") pod \"a1d34075-ef09-4cf1-8c85-3875cac010ea\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.269361 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a1d34075-ef09-4cf1-8c85-3875cac010ea-console-serving-cert\") pod \"a1d34075-ef09-4cf1-8c85-3875cac010ea\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.269383 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-service-ca\") pod \"a1d34075-ef09-4cf1-8c85-3875cac010ea\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.269434 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-oauth-serving-cert\") pod \"a1d34075-ef09-4cf1-8c85-3875cac010ea\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.269453 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a1d34075-ef09-4cf1-8c85-3875cac010ea-console-oauth-config\") pod \"a1d34075-ef09-4cf1-8c85-3875cac010ea\" (UID: \"a1d34075-ef09-4cf1-8c85-3875cac010ea\") " Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.269979 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-console-config" (OuterVolumeSpecName: "console-config") pod "a1d34075-ef09-4cf1-8c85-3875cac010ea" (UID: "a1d34075-ef09-4cf1-8c85-3875cac010ea"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.270004 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-service-ca" (OuterVolumeSpecName: "service-ca") pod "a1d34075-ef09-4cf1-8c85-3875cac010ea" (UID: "a1d34075-ef09-4cf1-8c85-3875cac010ea"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.270016 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "a1d34075-ef09-4cf1-8c85-3875cac010ea" (UID: "a1d34075-ef09-4cf1-8c85-3875cac010ea"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.270329 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "a1d34075-ef09-4cf1-8c85-3875cac010ea" (UID: "a1d34075-ef09-4cf1-8c85-3875cac010ea"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.272930 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1d34075-ef09-4cf1-8c85-3875cac010ea-kube-api-access-gnvc6" (OuterVolumeSpecName: "kube-api-access-gnvc6") pod "a1d34075-ef09-4cf1-8c85-3875cac010ea" (UID: "a1d34075-ef09-4cf1-8c85-3875cac010ea"). InnerVolumeSpecName "kube-api-access-gnvc6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.272997 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1d34075-ef09-4cf1-8c85-3875cac010ea-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "a1d34075-ef09-4cf1-8c85-3875cac010ea" (UID: "a1d34075-ef09-4cf1-8c85-3875cac010ea"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.274332 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1d34075-ef09-4cf1-8c85-3875cac010ea-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "a1d34075-ef09-4cf1-8c85-3875cac010ea" (UID: "a1d34075-ef09-4cf1-8c85-3875cac010ea"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.371732 5039 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a1d34075-ef09-4cf1-8c85-3875cac010ea-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.371769 5039 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.371781 5039 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.371792 5039 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a1d34075-ef09-4cf1-8c85-3875cac010ea-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.371802 5039 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.371813 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnvc6\" (UniqueName: \"kubernetes.io/projected/a1d34075-ef09-4cf1-8c85-3875cac010ea-kube-api-access-gnvc6\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.371825 5039 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a1d34075-ef09-4cf1-8c85-3875cac010ea-console-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.581570 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-5748778ffb-d9m2t"] Nov 24 13:38:32 crc kubenswrapper[5039]: I1124 13:38:32.588776 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-5748778ffb-d9m2t"] Nov 24 13:38:32 crc kubenswrapper[5039]: E1124 13:38:32.949877 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 24 13:38:32 crc kubenswrapper[5039]: E1124 13:38:32.949945 5039 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 24 13:38:32 crc kubenswrapper[5039]: E1124 13:38:32.950100 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mk6n2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(47799b2c-4219-475d-9a09-580720622ee4): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 24 13:38:32 crc kubenswrapper[5039]: E1124 13:38:32.951342 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/kube-state-metrics-0" podUID="47799b2c-4219-475d-9a09-580720622ee4" Nov 24 13:38:33 crc kubenswrapper[5039]: E1124 13:38:33.271166 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="47799b2c-4219-475d-9a09-580720622ee4" Nov 24 13:38:34 crc kubenswrapper[5039]: I1124 13:38:34.319787 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1d34075-ef09-4cf1-8c85-3875cac010ea" path="/var/lib/kubelet/pods/a1d34075-ef09-4cf1-8c85-3875cac010ea/volumes" Nov 24 13:38:34 crc kubenswrapper[5039]: E1124 13:38:34.713303 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-nb\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovsdbserver-nb-0" podUID="59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9" Nov 24 13:38:35 crc kubenswrapper[5039]: I1124 13:38:35.288864 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9","Type":"ContainerStarted","Data":"2e17bb06c651d6941216444675b783daca5aae930cf9d1ef53c747e7e27cc1e5"} Nov 24 13:38:35 crc kubenswrapper[5039]: I1124 13:38:35.290653 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 13:38:35 crc kubenswrapper[5039]: I1124 13:38:35.291541 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-s2gqw" event={"ID":"7d53a76d-409b-45a1-8000-d4f8f2b1ac18","Type":"ContainerStarted","Data":"2690c8838693fa7503253d9dac712d0404acf472f77fca73b2e8af9b094c539b"} Nov 24 13:38:35 crc kubenswrapper[5039]: I1124 13:38:35.293735 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c3dc205b-caf2-45c8-8110-d0f8be91e10f","Type":"ContainerStarted","Data":"07c368dd9f6f77959eb11530bb57f922eac7e01f2c46f82a8ce212799e5ea8e4"} Nov 24 13:38:35 crc kubenswrapper[5039]: I1124 13:38:35.295637 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"a5a69761-eccd-49e6-8749-86142600d287","Type":"ContainerStarted","Data":"ca93451c07be0a3fbac912578b9e97f4c3aa8913e1484c80bcbea97175820a44"} Nov 24 13:38:35 crc kubenswrapper[5039]: I1124 13:38:35.299646 5039 generic.go:334] "Generic (PLEG): container finished" podID="4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106" containerID="b8f605a478fb6189b8fa4ba6569c3b3a86f7cdd6495c4d8a092d8da38b4363fc" exitCode=0 Nov 24 13:38:35 crc kubenswrapper[5039]: I1124 13:38:35.299707 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" event={"ID":"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106","Type":"ContainerDied","Data":"b8f605a478fb6189b8fa4ba6569c3b3a86f7cdd6495c4d8a092d8da38b4363fc"} Nov 24 13:38:35 crc kubenswrapper[5039]: I1124 13:38:35.302750 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2cfx8" event={"ID":"cfe8f618-f843-4051-9491-cb3d06e1a1bc","Type":"ContainerStarted","Data":"db231aa884e7a2576599b09ae90df166c3695728a75dde151a2e448f4035c18c"} Nov 24 13:38:35 crc kubenswrapper[5039]: I1124 13:38:35.305722 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60","Type":"ContainerStarted","Data":"c54a6808ce60d5b17b8c478aa932d235816ea8f6182d0f860b99c79d9cef51af"} Nov 24 13:38:35 crc kubenswrapper[5039]: I1124 13:38:35.307996 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"3cef0b8e-d050-4055-a798-31b108727299","Type":"ContainerStarted","Data":"4ba1ba24e8878266600191d0433d3fc69e2bba9dc5a09e326f0932ef161cf8a0"} Nov 24 13:38:35 crc kubenswrapper[5039]: I1124 13:38:35.308062 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 24 13:38:35 crc kubenswrapper[5039]: I1124 13:38:35.325755 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5dqj9" event={"ID":"4fc86906-5a7c-4bfe-8d23-1c98a8711a4a","Type":"ContainerStarted","Data":"4ae2759e2a1c21c8f332631238c425d97963592225a6489655955354114e0005"} Nov 24 13:38:35 crc kubenswrapper[5039]: I1124 13:38:35.325837 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-5dqj9" Nov 24 13:38:35 crc kubenswrapper[5039]: I1124 13:38:35.332187 5039 generic.go:334] "Generic (PLEG): container finished" podID="d135c416-c117-4ff6-812c-3d02e07ebbd4" containerID="037c58e61be2cb4e2e27f0ac160a0764e8cdfdeaeab90a5b256592f72caaf730" exitCode=0 Nov 24 13:38:35 crc kubenswrapper[5039]: I1124 13:38:35.332243 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" event={"ID":"d135c416-c117-4ff6-812c-3d02e07ebbd4","Type":"ContainerDied","Data":"037c58e61be2cb4e2e27f0ac160a0764e8cdfdeaeab90a5b256592f72caaf730"} Nov 24 13:38:35 crc kubenswrapper[5039]: I1124 13:38:35.382111 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=53.824831796 podStartE2EDuration="1m5.382090896s" podCreationTimestamp="2025-11-24 13:37:30 +0000 UTC" firstStartedPulling="2025-11-24 13:38:01.695550213 +0000 UTC m=+1194.134674703" lastFinishedPulling="2025-11-24 13:38:13.252809293 +0000 UTC m=+1205.691933803" observedRunningTime="2025-11-24 13:38:35.371054667 +0000 UTC m=+1227.810179167" watchObservedRunningTime="2025-11-24 13:38:35.382090896 +0000 UTC m=+1227.821215396" Nov 24 13:38:35 crc kubenswrapper[5039]: I1124 13:38:35.400246 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-5dqj9" podStartSLOduration=30.424953507 podStartE2EDuration="57.400231079s" podCreationTimestamp="2025-11-24 13:37:38 +0000 UTC" firstStartedPulling="2025-11-24 13:38:01.675159696 +0000 UTC m=+1194.114284196" lastFinishedPulling="2025-11-24 13:38:28.650437268 +0000 UTC m=+1221.089561768" observedRunningTime="2025-11-24 13:38:35.396239391 +0000 UTC m=+1227.835363901" watchObservedRunningTime="2025-11-24 13:38:35.400231079 +0000 UTC m=+1227.839355579" Nov 24 13:38:35 crc kubenswrapper[5039]: I1124 13:38:35.461863 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-s2gqw" podStartSLOduration=43.941264368 podStartE2EDuration="1m1.461839961s" podCreationTimestamp="2025-11-24 13:37:34 +0000 UTC" firstStartedPulling="2025-11-24 13:38:01.683374237 +0000 UTC m=+1194.122498737" lastFinishedPulling="2025-11-24 13:38:19.20394982 +0000 UTC m=+1211.643074330" observedRunningTime="2025-11-24 13:38:35.420863611 +0000 UTC m=+1227.859988131" watchObservedRunningTime="2025-11-24 13:38:35.461839961 +0000 UTC m=+1227.900964461" Nov 24 13:38:36 crc kubenswrapper[5039]: I1124 13:38:36.340910 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"8e2e73c0-db1d-45e0-b056-0ed13bdbb904","Type":"ContainerStarted","Data":"6e40255f7df394711b21c74b237ac11944fc65febdf994a8a02b4cffa191e21c"} Nov 24 13:38:36 crc kubenswrapper[5039]: I1124 13:38:36.343427 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"869a1d3b-808b-4a44-b300-c2fb36a07e8a","Type":"ContainerStarted","Data":"dade2c8a2b5700d9f5635d510ff78ad8e50a7ecd29fb2bd3c7e68e7df9a834ff"} Nov 24 13:38:36 crc kubenswrapper[5039]: I1124 13:38:36.344727 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"6808fd4e-3718-430c-87e8-ca3e801a8248","Type":"ContainerStarted","Data":"ea9ea3aa66fddb386be5ad5b6935a74401ac939b2d26eb0fc82ed0db374a0398"} Nov 24 13:38:36 crc kubenswrapper[5039]: I1124 13:38:36.348607 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"a5a69761-eccd-49e6-8749-86142600d287","Type":"ContainerStarted","Data":"d78bd5deb59af09bb2005816d770462411cfcff231a7697712161d7b8c378eba"} Nov 24 13:38:36 crc kubenswrapper[5039]: I1124 13:38:36.418999 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=38.615269384 podStartE2EDuration="57.41898221s" podCreationTimestamp="2025-11-24 13:37:39 +0000 UTC" firstStartedPulling="2025-11-24 13:38:03.068391584 +0000 UTC m=+1195.507516124" lastFinishedPulling="2025-11-24 13:38:21.87210441 +0000 UTC m=+1214.311228950" observedRunningTime="2025-11-24 13:38:36.414073461 +0000 UTC m=+1228.853197981" watchObservedRunningTime="2025-11-24 13:38:36.41898221 +0000 UTC m=+1228.858106710" Nov 24 13:38:37 crc kubenswrapper[5039]: I1124 13:38:37.360165 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" event={"ID":"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106","Type":"ContainerStarted","Data":"b8e95f9a5417566542103b2197fcfac0a68c758f4f8e78e15a6d25cba8b0eb0b"} Nov 24 13:38:37 crc kubenswrapper[5039]: I1124 13:38:37.360782 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" Nov 24 13:38:37 crc kubenswrapper[5039]: I1124 13:38:37.362321 5039 generic.go:334] "Generic (PLEG): container finished" podID="cfe8f618-f843-4051-9491-cb3d06e1a1bc" containerID="db231aa884e7a2576599b09ae90df166c3695728a75dde151a2e448f4035c18c" exitCode=0 Nov 24 13:38:37 crc kubenswrapper[5039]: I1124 13:38:37.362458 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2cfx8" event={"ID":"cfe8f618-f843-4051-9491-cb3d06e1a1bc","Type":"ContainerDied","Data":"db231aa884e7a2576599b09ae90df166c3695728a75dde151a2e448f4035c18c"} Nov 24 13:38:37 crc kubenswrapper[5039]: I1124 13:38:37.364154 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9","Type":"ContainerStarted","Data":"b3221e70bc8304fb764f8c5ad616877efdf1b7afa4bc16f34d3ce5e778e8d5cf"} Nov 24 13:38:37 crc kubenswrapper[5039]: I1124 13:38:37.370290 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" event={"ID":"d135c416-c117-4ff6-812c-3d02e07ebbd4","Type":"ContainerStarted","Data":"5e25b3e11b626183d77095c5053e1ab3ba8fd6de9a8933e11c0d9725cb39f7e2"} Nov 24 13:38:37 crc kubenswrapper[5039]: I1124 13:38:37.388935 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" podStartSLOduration=5.40709248 podStartE2EDuration="1m11.388915352s" podCreationTimestamp="2025-11-24 13:37:26 +0000 UTC" firstStartedPulling="2025-11-24 13:37:27.074164958 +0000 UTC m=+1159.513289448" lastFinishedPulling="2025-11-24 13:38:33.05598782 +0000 UTC m=+1225.495112320" observedRunningTime="2025-11-24 13:38:37.383296895 +0000 UTC m=+1229.822421395" watchObservedRunningTime="2025-11-24 13:38:37.388915352 +0000 UTC m=+1229.828039852" Nov 24 13:38:37 crc kubenswrapper[5039]: I1124 13:38:37.411163 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=28.225921096 podStartE2EDuration="1m2.411143234s" podCreationTimestamp="2025-11-24 13:37:35 +0000 UTC" firstStartedPulling="2025-11-24 13:38:02.354909408 +0000 UTC m=+1194.794033908" lastFinishedPulling="2025-11-24 13:38:36.540131546 +0000 UTC m=+1228.979256046" observedRunningTime="2025-11-24 13:38:37.407054485 +0000 UTC m=+1229.846178995" watchObservedRunningTime="2025-11-24 13:38:37.411143234 +0000 UTC m=+1229.850267744" Nov 24 13:38:37 crc kubenswrapper[5039]: I1124 13:38:37.478066 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" podStartSLOduration=5.593615117 podStartE2EDuration="1m11.478027186s" podCreationTimestamp="2025-11-24 13:37:26 +0000 UTC" firstStartedPulling="2025-11-24 13:37:27.390295607 +0000 UTC m=+1159.829420107" lastFinishedPulling="2025-11-24 13:38:33.274707626 +0000 UTC m=+1225.713832176" observedRunningTime="2025-11-24 13:38:37.477439421 +0000 UTC m=+1229.916563921" watchObservedRunningTime="2025-11-24 13:38:37.478027186 +0000 UTC m=+1229.917151686" Nov 24 13:38:38 crc kubenswrapper[5039]: I1124 13:38:38.221269 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 24 13:38:38 crc kubenswrapper[5039]: I1124 13:38:38.289197 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 24 13:38:38 crc kubenswrapper[5039]: I1124 13:38:38.381802 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2cfx8" event={"ID":"cfe8f618-f843-4051-9491-cb3d06e1a1bc","Type":"ContainerStarted","Data":"e7181cc05b182f8fbe7bdd09c5e812e4b15c327fbaa5b2019fa76aef64e332c6"} Nov 24 13:38:38 crc kubenswrapper[5039]: I1124 13:38:38.381852 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2cfx8" event={"ID":"cfe8f618-f843-4051-9491-cb3d06e1a1bc","Type":"ContainerStarted","Data":"a352eba2e72bcdb41eb9ed11f5a2a28176ce8ef8abfb1f7d615db3847726a235"} Nov 24 13:38:38 crc kubenswrapper[5039]: I1124 13:38:38.382146 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 24 13:38:38 crc kubenswrapper[5039]: I1124 13:38:38.404240 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-2cfx8" podStartSLOduration=44.458395568 podStartE2EDuration="1m0.404225567s" podCreationTimestamp="2025-11-24 13:37:38 +0000 UTC" firstStartedPulling="2025-11-24 13:38:05.926134018 +0000 UTC m=+1198.365258518" lastFinishedPulling="2025-11-24 13:38:21.871963997 +0000 UTC m=+1214.311088517" observedRunningTime="2025-11-24 13:38:38.402049995 +0000 UTC m=+1230.841174505" watchObservedRunningTime="2025-11-24 13:38:38.404225567 +0000 UTC m=+1230.843350057" Nov 24 13:38:38 crc kubenswrapper[5039]: I1124 13:38:38.535213 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:38:38 crc kubenswrapper[5039]: I1124 13:38:38.535845 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.436429 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.711067 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-k7d8d"] Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.711516 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" podUID="d135c416-c117-4ff6-812c-3d02e07ebbd4" containerName="dnsmasq-dns" containerID="cri-o://5e25b3e11b626183d77095c5053e1ab3ba8fd6de9a8933e11c0d9725cb39f7e2" gracePeriod=10 Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.711635 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.728360 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.759742 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-pccxz"] Nov 24 13:38:39 crc kubenswrapper[5039]: E1124 13:38:39.760122 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1d34075-ef09-4cf1-8c85-3875cac010ea" containerName="console" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.760134 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1d34075-ef09-4cf1-8c85-3875cac010ea" containerName="console" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.760329 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1d34075-ef09-4cf1-8c85-3875cac010ea" containerName="console" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.761295 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.769602 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.783619 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-wb4sk"] Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.784763 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-wb4sk" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.788175 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.803590 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-pccxz"] Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.816265 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-wb4sk"] Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.822927 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.843198 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/6e986f2a-8ff1-4efb-aff0-7e294c0845bf-ovs-rundir\") pod \"ovn-controller-metrics-wb4sk\" (UID: \"6e986f2a-8ff1-4efb-aff0-7e294c0845bf\") " pod="openstack/ovn-controller-metrics-wb4sk" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.843357 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e986f2a-8ff1-4efb-aff0-7e294c0845bf-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wb4sk\" (UID: \"6e986f2a-8ff1-4efb-aff0-7e294c0845bf\") " pod="openstack/ovn-controller-metrics-wb4sk" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.843398 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/6e986f2a-8ff1-4efb-aff0-7e294c0845bf-ovn-rundir\") pod \"ovn-controller-metrics-wb4sk\" (UID: \"6e986f2a-8ff1-4efb-aff0-7e294c0845bf\") " pod="openstack/ovn-controller-metrics-wb4sk" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.843462 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/700d8512-7d20-4527-ac2f-16bfd6ad1010-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-pccxz\" (UID: \"700d8512-7d20-4527-ac2f-16bfd6ad1010\") " pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.843536 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/700d8512-7d20-4527-ac2f-16bfd6ad1010-config\") pod \"dnsmasq-dns-7f896c8c65-pccxz\" (UID: \"700d8512-7d20-4527-ac2f-16bfd6ad1010\") " pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.843788 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e986f2a-8ff1-4efb-aff0-7e294c0845bf-combined-ca-bundle\") pod \"ovn-controller-metrics-wb4sk\" (UID: \"6e986f2a-8ff1-4efb-aff0-7e294c0845bf\") " pod="openstack/ovn-controller-metrics-wb4sk" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.843830 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s57kk\" (UniqueName: \"kubernetes.io/projected/6e986f2a-8ff1-4efb-aff0-7e294c0845bf-kube-api-access-s57kk\") pod \"ovn-controller-metrics-wb4sk\" (UID: \"6e986f2a-8ff1-4efb-aff0-7e294c0845bf\") " pod="openstack/ovn-controller-metrics-wb4sk" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.843896 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e986f2a-8ff1-4efb-aff0-7e294c0845bf-config\") pod \"ovn-controller-metrics-wb4sk\" (UID: \"6e986f2a-8ff1-4efb-aff0-7e294c0845bf\") " pod="openstack/ovn-controller-metrics-wb4sk" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.843975 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/700d8512-7d20-4527-ac2f-16bfd6ad1010-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-pccxz\" (UID: \"700d8512-7d20-4527-ac2f-16bfd6ad1010\") " pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.844043 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5g827\" (UniqueName: \"kubernetes.io/projected/700d8512-7d20-4527-ac2f-16bfd6ad1010-kube-api-access-5g827\") pod \"dnsmasq-dns-7f896c8c65-pccxz\" (UID: \"700d8512-7d20-4527-ac2f-16bfd6ad1010\") " pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.945406 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e986f2a-8ff1-4efb-aff0-7e294c0845bf-combined-ca-bundle\") pod \"ovn-controller-metrics-wb4sk\" (UID: \"6e986f2a-8ff1-4efb-aff0-7e294c0845bf\") " pod="openstack/ovn-controller-metrics-wb4sk" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.945452 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s57kk\" (UniqueName: \"kubernetes.io/projected/6e986f2a-8ff1-4efb-aff0-7e294c0845bf-kube-api-access-s57kk\") pod \"ovn-controller-metrics-wb4sk\" (UID: \"6e986f2a-8ff1-4efb-aff0-7e294c0845bf\") " pod="openstack/ovn-controller-metrics-wb4sk" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.945480 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e986f2a-8ff1-4efb-aff0-7e294c0845bf-config\") pod \"ovn-controller-metrics-wb4sk\" (UID: \"6e986f2a-8ff1-4efb-aff0-7e294c0845bf\") " pod="openstack/ovn-controller-metrics-wb4sk" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.945526 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/700d8512-7d20-4527-ac2f-16bfd6ad1010-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-pccxz\" (UID: \"700d8512-7d20-4527-ac2f-16bfd6ad1010\") " pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.945551 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5g827\" (UniqueName: \"kubernetes.io/projected/700d8512-7d20-4527-ac2f-16bfd6ad1010-kube-api-access-5g827\") pod \"dnsmasq-dns-7f896c8c65-pccxz\" (UID: \"700d8512-7d20-4527-ac2f-16bfd6ad1010\") " pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.945584 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/6e986f2a-8ff1-4efb-aff0-7e294c0845bf-ovs-rundir\") pod \"ovn-controller-metrics-wb4sk\" (UID: \"6e986f2a-8ff1-4efb-aff0-7e294c0845bf\") " pod="openstack/ovn-controller-metrics-wb4sk" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.945611 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e986f2a-8ff1-4efb-aff0-7e294c0845bf-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wb4sk\" (UID: \"6e986f2a-8ff1-4efb-aff0-7e294c0845bf\") " pod="openstack/ovn-controller-metrics-wb4sk" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.945630 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/6e986f2a-8ff1-4efb-aff0-7e294c0845bf-ovn-rundir\") pod \"ovn-controller-metrics-wb4sk\" (UID: \"6e986f2a-8ff1-4efb-aff0-7e294c0845bf\") " pod="openstack/ovn-controller-metrics-wb4sk" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.945646 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/700d8512-7d20-4527-ac2f-16bfd6ad1010-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-pccxz\" (UID: \"700d8512-7d20-4527-ac2f-16bfd6ad1010\") " pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.945670 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/700d8512-7d20-4527-ac2f-16bfd6ad1010-config\") pod \"dnsmasq-dns-7f896c8c65-pccxz\" (UID: \"700d8512-7d20-4527-ac2f-16bfd6ad1010\") " pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.946816 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/700d8512-7d20-4527-ac2f-16bfd6ad1010-config\") pod \"dnsmasq-dns-7f896c8c65-pccxz\" (UID: \"700d8512-7d20-4527-ac2f-16bfd6ad1010\") " pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.946996 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/6e986f2a-8ff1-4efb-aff0-7e294c0845bf-ovn-rundir\") pod \"ovn-controller-metrics-wb4sk\" (UID: \"6e986f2a-8ff1-4efb-aff0-7e294c0845bf\") " pod="openstack/ovn-controller-metrics-wb4sk" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.947609 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/700d8512-7d20-4527-ac2f-16bfd6ad1010-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-pccxz\" (UID: \"700d8512-7d20-4527-ac2f-16bfd6ad1010\") " pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.947696 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e986f2a-8ff1-4efb-aff0-7e294c0845bf-config\") pod \"ovn-controller-metrics-wb4sk\" (UID: \"6e986f2a-8ff1-4efb-aff0-7e294c0845bf\") " pod="openstack/ovn-controller-metrics-wb4sk" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.947887 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/6e986f2a-8ff1-4efb-aff0-7e294c0845bf-ovs-rundir\") pod \"ovn-controller-metrics-wb4sk\" (UID: \"6e986f2a-8ff1-4efb-aff0-7e294c0845bf\") " pod="openstack/ovn-controller-metrics-wb4sk" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.948438 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/700d8512-7d20-4527-ac2f-16bfd6ad1010-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-pccxz\" (UID: \"700d8512-7d20-4527-ac2f-16bfd6ad1010\") " pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.951852 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6e986f2a-8ff1-4efb-aff0-7e294c0845bf-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wb4sk\" (UID: \"6e986f2a-8ff1-4efb-aff0-7e294c0845bf\") " pod="openstack/ovn-controller-metrics-wb4sk" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.965681 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e986f2a-8ff1-4efb-aff0-7e294c0845bf-combined-ca-bundle\") pod \"ovn-controller-metrics-wb4sk\" (UID: \"6e986f2a-8ff1-4efb-aff0-7e294c0845bf\") " pod="openstack/ovn-controller-metrics-wb4sk" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.968911 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s57kk\" (UniqueName: \"kubernetes.io/projected/6e986f2a-8ff1-4efb-aff0-7e294c0845bf-kube-api-access-s57kk\") pod \"ovn-controller-metrics-wb4sk\" (UID: \"6e986f2a-8ff1-4efb-aff0-7e294c0845bf\") " pod="openstack/ovn-controller-metrics-wb4sk" Nov 24 13:38:39 crc kubenswrapper[5039]: I1124 13:38:39.972142 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5g827\" (UniqueName: \"kubernetes.io/projected/700d8512-7d20-4527-ac2f-16bfd6ad1010-kube-api-access-5g827\") pod \"dnsmasq-dns-7f896c8c65-pccxz\" (UID: \"700d8512-7d20-4527-ac2f-16bfd6ad1010\") " pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.089974 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-kxs9n"] Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.090320 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" podUID="4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106" containerName="dnsmasq-dns" containerID="cri-o://b8e95f9a5417566542103b2197fcfac0a68c758f4f8e78e15a6d25cba8b0eb0b" gracePeriod=10 Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.098352 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.116918 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-wb4sk" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.136362 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-vml7s"] Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.138025 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.157013 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.191136 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-vml7s"] Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.270693 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8r26\" (UniqueName: \"kubernetes.io/projected/ab1f8974-d270-4772-b638-019df2da6954-kube-api-access-z8r26\") pod \"dnsmasq-dns-86db49b7ff-vml7s\" (UID: \"ab1f8974-d270-4772-b638-019df2da6954\") " pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.270769 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-vml7s\" (UID: \"ab1f8974-d270-4772-b638-019df2da6954\") " pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.270863 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-config\") pod \"dnsmasq-dns-86db49b7ff-vml7s\" (UID: \"ab1f8974-d270-4772-b638-019df2da6954\") " pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.271058 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-vml7s\" (UID: \"ab1f8974-d270-4772-b638-019df2da6954\") " pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.271165 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-vml7s\" (UID: \"ab1f8974-d270-4772-b638-019df2da6954\") " pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.373029 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-config\") pod \"dnsmasq-dns-86db49b7ff-vml7s\" (UID: \"ab1f8974-d270-4772-b638-019df2da6954\") " pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.373265 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-vml7s\" (UID: \"ab1f8974-d270-4772-b638-019df2da6954\") " pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.373379 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-vml7s\" (UID: \"ab1f8974-d270-4772-b638-019df2da6954\") " pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.373628 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8r26\" (UniqueName: \"kubernetes.io/projected/ab1f8974-d270-4772-b638-019df2da6954-kube-api-access-z8r26\") pod \"dnsmasq-dns-86db49b7ff-vml7s\" (UID: \"ab1f8974-d270-4772-b638-019df2da6954\") " pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.373704 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-vml7s\" (UID: \"ab1f8974-d270-4772-b638-019df2da6954\") " pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.433261 5039 generic.go:334] "Generic (PLEG): container finished" podID="d135c416-c117-4ff6-812c-3d02e07ebbd4" containerID="5e25b3e11b626183d77095c5053e1ab3ba8fd6de9a8933e11c0d9725cb39f7e2" exitCode=0 Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.448203 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.448349 5039 generic.go:334] "Generic (PLEG): container finished" podID="4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106" containerID="b8e95f9a5417566542103b2197fcfac0a68c758f4f8e78e15a6d25cba8b0eb0b" exitCode=0 Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.465715 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-vml7s\" (UID: \"ab1f8974-d270-4772-b638-019df2da6954\") " pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.465929 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" event={"ID":"d135c416-c117-4ff6-812c-3d02e07ebbd4","Type":"ContainerDied","Data":"5e25b3e11b626183d77095c5053e1ab3ba8fd6de9a8933e11c0d9725cb39f7e2"} Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.465986 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" event={"ID":"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106","Type":"ContainerDied","Data":"b8e95f9a5417566542103b2197fcfac0a68c758f4f8e78e15a6d25cba8b0eb0b"} Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.466023 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.466062 5039 scope.go:117] "RemoveContainer" containerID="5e25b3e11b626183d77095c5053e1ab3ba8fd6de9a8933e11c0d9725cb39f7e2" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.491294 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-vml7s\" (UID: \"ab1f8974-d270-4772-b638-019df2da6954\") " pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.494862 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8r26\" (UniqueName: \"kubernetes.io/projected/ab1f8974-d270-4772-b638-019df2da6954-kube-api-access-z8r26\") pod \"dnsmasq-dns-86db49b7ff-vml7s\" (UID: \"ab1f8974-d270-4772-b638-019df2da6954\") " pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.500125 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-vml7s\" (UID: \"ab1f8974-d270-4772-b638-019df2da6954\") " pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.500500 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-config\") pod \"dnsmasq-dns-86db49b7ff-vml7s\" (UID: \"ab1f8974-d270-4772-b638-019df2da6954\") " pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.583885 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d135c416-c117-4ff6-812c-3d02e07ebbd4-dns-svc\") pod \"d135c416-c117-4ff6-812c-3d02e07ebbd4\" (UID: \"d135c416-c117-4ff6-812c-3d02e07ebbd4\") " Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.583993 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d135c416-c117-4ff6-812c-3d02e07ebbd4-config\") pod \"d135c416-c117-4ff6-812c-3d02e07ebbd4\" (UID: \"d135c416-c117-4ff6-812c-3d02e07ebbd4\") " Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.584041 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t94d4\" (UniqueName: \"kubernetes.io/projected/d135c416-c117-4ff6-812c-3d02e07ebbd4-kube-api-access-t94d4\") pod \"d135c416-c117-4ff6-812c-3d02e07ebbd4\" (UID: \"d135c416-c117-4ff6-812c-3d02e07ebbd4\") " Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.594873 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d135c416-c117-4ff6-812c-3d02e07ebbd4-kube-api-access-t94d4" (OuterVolumeSpecName: "kube-api-access-t94d4") pod "d135c416-c117-4ff6-812c-3d02e07ebbd4" (UID: "d135c416-c117-4ff6-812c-3d02e07ebbd4"). InnerVolumeSpecName "kube-api-access-t94d4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.639192 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d135c416-c117-4ff6-812c-3d02e07ebbd4-config" (OuterVolumeSpecName: "config") pod "d135c416-c117-4ff6-812c-3d02e07ebbd4" (UID: "d135c416-c117-4ff6-812c-3d02e07ebbd4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.652683 5039 scope.go:117] "RemoveContainer" containerID="037c58e61be2cb4e2e27f0ac160a0764e8cdfdeaeab90a5b256592f72caaf730" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.669388 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d135c416-c117-4ff6-812c-3d02e07ebbd4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d135c416-c117-4ff6-812c-3d02e07ebbd4" (UID: "d135c416-c117-4ff6-812c-3d02e07ebbd4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.689015 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d135c416-c117-4ff6-812c-3d02e07ebbd4-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.689049 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t94d4\" (UniqueName: \"kubernetes.io/projected/d135c416-c117-4ff6-812c-3d02e07ebbd4-kube-api-access-t94d4\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.689061 5039 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d135c416-c117-4ff6-812c-3d02e07ebbd4-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.711170 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.728786 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-pccxz"] Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.767342 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.891839 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-28w9k\" (UniqueName: \"kubernetes.io/projected/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106-kube-api-access-28w9k\") pod \"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106\" (UID: \"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106\") " Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.892221 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106-config\") pod \"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106\" (UID: \"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106\") " Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.892294 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106-dns-svc\") pod \"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106\" (UID: \"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106\") " Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.898968 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106-kube-api-access-28w9k" (OuterVolumeSpecName: "kube-api-access-28w9k") pod "4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106" (UID: "4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106"). InnerVolumeSpecName "kube-api-access-28w9k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.924478 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-wb4sk"] Nov 24 13:38:40 crc kubenswrapper[5039]: W1124 13:38:40.953032 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6e986f2a_8ff1_4efb_aff0_7e294c0845bf.slice/crio-43db052bf86b861a8b0f24dc0f4f47bd84eebd968eeb92a0d142185a51427c99 WatchSource:0}: Error finding container 43db052bf86b861a8b0f24dc0f4f47bd84eebd968eeb92a0d142185a51427c99: Status 404 returned error can't find the container with id 43db052bf86b861a8b0f24dc0f4f47bd84eebd968eeb92a0d142185a51427c99 Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.982564 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106-config" (OuterVolumeSpecName: "config") pod "4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106" (UID: "4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.994581 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106" (UID: "4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.994797 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106-dns-svc\") pod \"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106\" (UID: \"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106\") " Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.995622 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-28w9k\" (UniqueName: \"kubernetes.io/projected/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106-kube-api-access-28w9k\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.995645 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:40 crc kubenswrapper[5039]: W1124 13:38:40.995719 5039 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106/volumes/kubernetes.io~configmap/dns-svc Nov 24 13:38:40 crc kubenswrapper[5039]: I1124 13:38:40.995730 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106" (UID: "4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.100849 5039 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.127682 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.276105 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-vml7s"] Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.458598 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" event={"ID":"700d8512-7d20-4527-ac2f-16bfd6ad1010","Type":"ContainerStarted","Data":"864e31ec8b1decfc2ab85c7bebdfd6bec4a353454abe69e3b038ba5d5e6b9b0a"} Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.458643 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" event={"ID":"700d8512-7d20-4527-ac2f-16bfd6ad1010","Type":"ContainerStarted","Data":"6e61e5ece10258b30f95851e8b19040814e2d1d08ccf5572d92328be7151e53b"} Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.459544 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" event={"ID":"d135c416-c117-4ff6-812c-3d02e07ebbd4","Type":"ContainerDied","Data":"ddbd66daef8dac28b26dd26a3b51820a5ab6732c2781e28ca3e24edef41873ea"} Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.459612 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-k7d8d" Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.464177 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" event={"ID":"4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106","Type":"ContainerDied","Data":"35c9eb0aca9c89b1dfa30629f3ea8db8cfec1cd073a60464f6fc04c769424b78"} Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.464227 5039 scope.go:117] "RemoveContainer" containerID="b8e95f9a5417566542103b2197fcfac0a68c758f4f8e78e15a6d25cba8b0eb0b" Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.464225 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-kxs9n" Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.482275 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-wb4sk" event={"ID":"6e986f2a-8ff1-4efb-aff0-7e294c0845bf","Type":"ContainerStarted","Data":"580575b15df97b960ae320d8b30a2210c0bea38171bccc0190833b8d58f06753"} Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.482316 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-wb4sk" event={"ID":"6e986f2a-8ff1-4efb-aff0-7e294c0845bf","Type":"ContainerStarted","Data":"43db052bf86b861a8b0f24dc0f4f47bd84eebd968eeb92a0d142185a51427c99"} Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.484904 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" event={"ID":"ab1f8974-d270-4772-b638-019df2da6954","Type":"ContainerStarted","Data":"5d113230aaaab4e1ade1991bedebb226da521ab03b5d4b4df00b32c1f9b70adb"} Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.502212 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-wb4sk" podStartSLOduration=2.5021934789999998 podStartE2EDuration="2.502193479s" podCreationTimestamp="2025-11-24 13:38:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:38:41.499031682 +0000 UTC m=+1233.938156192" watchObservedRunningTime="2025-11-24 13:38:41.502193479 +0000 UTC m=+1233.941317979" Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.593321 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-kxs9n"] Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.593432 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.603177 5039 scope.go:117] "RemoveContainer" containerID="b8f605a478fb6189b8fa4ba6569c3b3a86f7cdd6495c4d8a092d8da38b4363fc" Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.608323 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-kxs9n"] Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.624913 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-k7d8d"] Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.637894 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-k7d8d"] Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.902150 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 24 13:38:41 crc kubenswrapper[5039]: E1124 13:38:41.905116 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106" containerName="init" Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.905154 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106" containerName="init" Nov 24 13:38:41 crc kubenswrapper[5039]: E1124 13:38:41.905177 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106" containerName="dnsmasq-dns" Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.905186 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106" containerName="dnsmasq-dns" Nov 24 13:38:41 crc kubenswrapper[5039]: E1124 13:38:41.905200 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d135c416-c117-4ff6-812c-3d02e07ebbd4" containerName="dnsmasq-dns" Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.905209 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="d135c416-c117-4ff6-812c-3d02e07ebbd4" containerName="dnsmasq-dns" Nov 24 13:38:41 crc kubenswrapper[5039]: E1124 13:38:41.905221 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d135c416-c117-4ff6-812c-3d02e07ebbd4" containerName="init" Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.905229 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="d135c416-c117-4ff6-812c-3d02e07ebbd4" containerName="init" Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.905481 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106" containerName="dnsmasq-dns" Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.905526 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="d135c416-c117-4ff6-812c-3d02e07ebbd4" containerName="dnsmasq-dns" Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.913774 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.913900 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.917797 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.919466 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.919723 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-8s94x" Nov 24 13:38:41 crc kubenswrapper[5039]: I1124 13:38:41.919789 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.021013 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9523b0b0-e489-4eb8-8954-83dd766373df-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"9523b0b0-e489-4eb8-8954-83dd766373df\") " pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.021067 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9523b0b0-e489-4eb8-8954-83dd766373df-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"9523b0b0-e489-4eb8-8954-83dd766373df\") " pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.021224 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9523b0b0-e489-4eb8-8954-83dd766373df-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"9523b0b0-e489-4eb8-8954-83dd766373df\") " pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.021391 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9523b0b0-e489-4eb8-8954-83dd766373df-scripts\") pod \"ovn-northd-0\" (UID: \"9523b0b0-e489-4eb8-8954-83dd766373df\") " pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.021465 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n8v24\" (UniqueName: \"kubernetes.io/projected/9523b0b0-e489-4eb8-8954-83dd766373df-kube-api-access-n8v24\") pod \"ovn-northd-0\" (UID: \"9523b0b0-e489-4eb8-8954-83dd766373df\") " pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.021751 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/9523b0b0-e489-4eb8-8954-83dd766373df-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"9523b0b0-e489-4eb8-8954-83dd766373df\") " pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.021882 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9523b0b0-e489-4eb8-8954-83dd766373df-config\") pod \"ovn-northd-0\" (UID: \"9523b0b0-e489-4eb8-8954-83dd766373df\") " pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.123878 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9523b0b0-e489-4eb8-8954-83dd766373df-scripts\") pod \"ovn-northd-0\" (UID: \"9523b0b0-e489-4eb8-8954-83dd766373df\") " pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.123942 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n8v24\" (UniqueName: \"kubernetes.io/projected/9523b0b0-e489-4eb8-8954-83dd766373df-kube-api-access-n8v24\") pod \"ovn-northd-0\" (UID: \"9523b0b0-e489-4eb8-8954-83dd766373df\") " pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.124015 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/9523b0b0-e489-4eb8-8954-83dd766373df-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"9523b0b0-e489-4eb8-8954-83dd766373df\") " pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.124069 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9523b0b0-e489-4eb8-8954-83dd766373df-config\") pod \"ovn-northd-0\" (UID: \"9523b0b0-e489-4eb8-8954-83dd766373df\") " pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.124097 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9523b0b0-e489-4eb8-8954-83dd766373df-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"9523b0b0-e489-4eb8-8954-83dd766373df\") " pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.124128 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9523b0b0-e489-4eb8-8954-83dd766373df-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"9523b0b0-e489-4eb8-8954-83dd766373df\") " pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.124178 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9523b0b0-e489-4eb8-8954-83dd766373df-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"9523b0b0-e489-4eb8-8954-83dd766373df\") " pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.125085 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9523b0b0-e489-4eb8-8954-83dd766373df-config\") pod \"ovn-northd-0\" (UID: \"9523b0b0-e489-4eb8-8954-83dd766373df\") " pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.125096 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9523b0b0-e489-4eb8-8954-83dd766373df-scripts\") pod \"ovn-northd-0\" (UID: \"9523b0b0-e489-4eb8-8954-83dd766373df\") " pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.125535 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9523b0b0-e489-4eb8-8954-83dd766373df-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"9523b0b0-e489-4eb8-8954-83dd766373df\") " pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.132253 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9523b0b0-e489-4eb8-8954-83dd766373df-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"9523b0b0-e489-4eb8-8954-83dd766373df\") " pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.132830 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/9523b0b0-e489-4eb8-8954-83dd766373df-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"9523b0b0-e489-4eb8-8954-83dd766373df\") " pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.133553 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9523b0b0-e489-4eb8-8954-83dd766373df-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"9523b0b0-e489-4eb8-8954-83dd766373df\") " pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.147080 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n8v24\" (UniqueName: \"kubernetes.io/projected/9523b0b0-e489-4eb8-8954-83dd766373df-kube-api-access-n8v24\") pod \"ovn-northd-0\" (UID: \"9523b0b0-e489-4eb8-8954-83dd766373df\") " pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.237815 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.333373 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106" path="/var/lib/kubelet/pods/4da0a4e8-d6eb-4e5d-bf79-3d1ea106c106/volumes" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.334073 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d135c416-c117-4ff6-812c-3d02e07ebbd4" path="/var/lib/kubelet/pods/d135c416-c117-4ff6-812c-3d02e07ebbd4/volumes" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.497954 5039 generic.go:334] "Generic (PLEG): container finished" podID="ab1f8974-d270-4772-b638-019df2da6954" containerID="a3f1ea8d0163751ad991897045a02e7e43879e23485e8d980e75b45aef827608" exitCode=0 Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.498011 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" event={"ID":"ab1f8974-d270-4772-b638-019df2da6954","Type":"ContainerDied","Data":"a3f1ea8d0163751ad991897045a02e7e43879e23485e8d980e75b45aef827608"} Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.502290 5039 generic.go:334] "Generic (PLEG): container finished" podID="700d8512-7d20-4527-ac2f-16bfd6ad1010" containerID="864e31ec8b1decfc2ab85c7bebdfd6bec4a353454abe69e3b038ba5d5e6b9b0a" exitCode=0 Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.502346 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" event={"ID":"700d8512-7d20-4527-ac2f-16bfd6ad1010","Type":"ContainerDied","Data":"864e31ec8b1decfc2ab85c7bebdfd6bec4a353454abe69e3b038ba5d5e6b9b0a"} Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.502369 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" event={"ID":"700d8512-7d20-4527-ac2f-16bfd6ad1010","Type":"ContainerStarted","Data":"de03cc9e6f804b282b6c7a8e8ddaab61dc6a416d6440592ea13952aac1e331c0"} Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.503710 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.562037 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" podStartSLOduration=3.56201496 podStartE2EDuration="3.56201496s" podCreationTimestamp="2025-11-24 13:38:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:38:42.552678121 +0000 UTC m=+1234.991802621" watchObservedRunningTime="2025-11-24 13:38:42.56201496 +0000 UTC m=+1235.001139460" Nov 24 13:38:42 crc kubenswrapper[5039]: I1124 13:38:42.718775 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 24 13:38:42 crc kubenswrapper[5039]: W1124 13:38:42.725178 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9523b0b0_e489_4eb8_8954_83dd766373df.slice/crio-66292f5bc9c4b2c446b4718d2f83831a882bc0294b60d44ce209f6da10b6a3dc WatchSource:0}: Error finding container 66292f5bc9c4b2c446b4718d2f83831a882bc0294b60d44ce209f6da10b6a3dc: Status 404 returned error can't find the container with id 66292f5bc9c4b2c446b4718d2f83831a882bc0294b60d44ce209f6da10b6a3dc Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.518473 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"9523b0b0-e489-4eb8-8954-83dd766373df","Type":"ContainerStarted","Data":"66292f5bc9c4b2c446b4718d2f83831a882bc0294b60d44ce209f6da10b6a3dc"} Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.522208 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" event={"ID":"ab1f8974-d270-4772-b638-019df2da6954","Type":"ContainerStarted","Data":"d3b91a683fc96d83dc1eceb97781f6105280bedd756939332f7bd44be0084c7c"} Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.522454 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.527486 5039 generic.go:334] "Generic (PLEG): container finished" podID="869a1d3b-808b-4a44-b300-c2fb36a07e8a" containerID="dade2c8a2b5700d9f5635d510ff78ad8e50a7ecd29fb2bd3c7e68e7df9a834ff" exitCode=0 Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.527658 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"869a1d3b-808b-4a44-b300-c2fb36a07e8a","Type":"ContainerDied","Data":"dade2c8a2b5700d9f5635d510ff78ad8e50a7ecd29fb2bd3c7e68e7df9a834ff"} Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.545979 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" podStartSLOduration=3.545964518 podStartE2EDuration="3.545964518s" podCreationTimestamp="2025-11-24 13:38:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:38:43.545346752 +0000 UTC m=+1235.984471252" watchObservedRunningTime="2025-11-24 13:38:43.545964518 +0000 UTC m=+1235.985089018" Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.737747 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-pccxz"] Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.775619 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-x8p79"] Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.781772 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-x8p79" Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.815582 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-x8p79"] Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.866366 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4fml\" (UniqueName: \"kubernetes.io/projected/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-kube-api-access-s4fml\") pod \"dnsmasq-dns-698758b865-x8p79\" (UID: \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\") " pod="openstack/dnsmasq-dns-698758b865-x8p79" Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.866491 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-config\") pod \"dnsmasq-dns-698758b865-x8p79\" (UID: \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\") " pod="openstack/dnsmasq-dns-698758b865-x8p79" Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.866545 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-x8p79\" (UID: \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\") " pod="openstack/dnsmasq-dns-698758b865-x8p79" Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.866575 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-dns-svc\") pod \"dnsmasq-dns-698758b865-x8p79\" (UID: \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\") " pod="openstack/dnsmasq-dns-698758b865-x8p79" Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.866594 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-x8p79\" (UID: \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\") " pod="openstack/dnsmasq-dns-698758b865-x8p79" Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.968561 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-config\") pod \"dnsmasq-dns-698758b865-x8p79\" (UID: \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\") " pod="openstack/dnsmasq-dns-698758b865-x8p79" Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.968656 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-x8p79\" (UID: \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\") " pod="openstack/dnsmasq-dns-698758b865-x8p79" Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.968694 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-dns-svc\") pod \"dnsmasq-dns-698758b865-x8p79\" (UID: \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\") " pod="openstack/dnsmasq-dns-698758b865-x8p79" Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.968721 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-x8p79\" (UID: \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\") " pod="openstack/dnsmasq-dns-698758b865-x8p79" Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.969135 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4fml\" (UniqueName: \"kubernetes.io/projected/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-kube-api-access-s4fml\") pod \"dnsmasq-dns-698758b865-x8p79\" (UID: \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\") " pod="openstack/dnsmasq-dns-698758b865-x8p79" Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.969525 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-config\") pod \"dnsmasq-dns-698758b865-x8p79\" (UID: \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\") " pod="openstack/dnsmasq-dns-698758b865-x8p79" Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.969774 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-x8p79\" (UID: \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\") " pod="openstack/dnsmasq-dns-698758b865-x8p79" Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.969751 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-x8p79\" (UID: \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\") " pod="openstack/dnsmasq-dns-698758b865-x8p79" Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.969683 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-dns-svc\") pod \"dnsmasq-dns-698758b865-x8p79\" (UID: \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\") " pod="openstack/dnsmasq-dns-698758b865-x8p79" Nov 24 13:38:43 crc kubenswrapper[5039]: I1124 13:38:43.990031 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4fml\" (UniqueName: \"kubernetes.io/projected/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-kube-api-access-s4fml\") pod \"dnsmasq-dns-698758b865-x8p79\" (UID: \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\") " pod="openstack/dnsmasq-dns-698758b865-x8p79" Nov 24 13:38:44 crc kubenswrapper[5039]: I1124 13:38:44.100493 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-x8p79" Nov 24 13:38:44 crc kubenswrapper[5039]: I1124 13:38:44.584528 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-x8p79"] Nov 24 13:38:44 crc kubenswrapper[5039]: W1124 13:38:44.587416 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8bf2c80f_7e58_4be8_b373_6ddc3b0efb97.slice/crio-3492438d70fa7dffd467593b968240b9c55b6252e9750f3eca1b29a489d641b0 WatchSource:0}: Error finding container 3492438d70fa7dffd467593b968240b9c55b6252e9750f3eca1b29a489d641b0: Status 404 returned error can't find the container with id 3492438d70fa7dffd467593b968240b9c55b6252e9750f3eca1b29a489d641b0 Nov 24 13:38:44 crc kubenswrapper[5039]: I1124 13:38:44.895137 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 24 13:38:44 crc kubenswrapper[5039]: I1124 13:38:44.900963 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 24 13:38:44 crc kubenswrapper[5039]: I1124 13:38:44.903251 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 24 13:38:44 crc kubenswrapper[5039]: I1124 13:38:44.904013 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 24 13:38:44 crc kubenswrapper[5039]: I1124 13:38:44.904338 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-lzhgh" Nov 24 13:38:44 crc kubenswrapper[5039]: I1124 13:38:44.908791 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 24 13:38:44 crc kubenswrapper[5039]: I1124 13:38:44.950526 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 24 13:38:44 crc kubenswrapper[5039]: I1124 13:38:44.986042 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhmg6\" (UniqueName: \"kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-kube-api-access-qhmg6\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") " pod="openstack/swift-storage-0" Nov 24 13:38:44 crc kubenswrapper[5039]: I1124 13:38:44.986350 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") " pod="openstack/swift-storage-0" Nov 24 13:38:44 crc kubenswrapper[5039]: I1124 13:38:44.986605 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-lock\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") " pod="openstack/swift-storage-0" Nov 24 13:38:44 crc kubenswrapper[5039]: I1124 13:38:44.986735 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-etc-swift\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") " pod="openstack/swift-storage-0" Nov 24 13:38:44 crc kubenswrapper[5039]: I1124 13:38:44.986809 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-cache\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") " pod="openstack/swift-storage-0" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.088148 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-lock\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") " pod="openstack/swift-storage-0" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.088188 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-etc-swift\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") " pod="openstack/swift-storage-0" Nov 24 13:38:45 crc kubenswrapper[5039]: E1124 13:38:45.088469 5039 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 24 13:38:45 crc kubenswrapper[5039]: E1124 13:38:45.088540 5039 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 24 13:38:45 crc kubenswrapper[5039]: E1124 13:38:45.088601 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-etc-swift podName:ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f nodeName:}" failed. No retries permitted until 2025-11-24 13:38:45.588582157 +0000 UTC m=+1238.027706657 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-etc-swift") pod "swift-storage-0" (UID: "ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f") : configmap "swift-ring-files" not found Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.088642 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-lock\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") " pod="openstack/swift-storage-0" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.088702 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-cache\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") " pod="openstack/swift-storage-0" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.088778 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhmg6\" (UniqueName: \"kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-kube-api-access-qhmg6\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") " pod="openstack/swift-storage-0" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.088851 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") " pod="openstack/swift-storage-0" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.089146 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/swift-storage-0" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.089175 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-cache\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") " pod="openstack/swift-storage-0" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.110202 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhmg6\" (UniqueName: \"kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-kube-api-access-qhmg6\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") " pod="openstack/swift-storage-0" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.112666 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") " pod="openstack/swift-storage-0" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.358372 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-bl4lw"] Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.360023 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.363227 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.363570 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.363736 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.405399 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-vnskv"] Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.406781 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.419733 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-vnskv"] Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.456742 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-bl4lw"] Nov 24 13:38:45 crc kubenswrapper[5039]: E1124 13:38:45.458005 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-xvwzg ring-data-devices scripts swiftconf], unattached volumes=[], failed to process volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-xvwzg ring-data-devices scripts swiftconf]: context canceled" pod="openstack/swift-ring-rebalance-bl4lw" podUID="27053933-785a-44a7-8468-293246bd1721" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.489084 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-bl4lw"] Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.494914 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/27053933-785a-44a7-8468-293246bd1721-dispersionconf\") pod \"swift-ring-rebalance-bl4lw\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.494964 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/27053933-785a-44a7-8468-293246bd1721-etc-swift\") pod \"swift-ring-rebalance-bl4lw\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.494982 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/27053933-785a-44a7-8468-293246bd1721-swiftconf\") pod \"swift-ring-rebalance-bl4lw\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.494998 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d7a6efff-c0ad-43c3-999c-d4840d3c5825-ring-data-devices\") pod \"swift-ring-rebalance-vnskv\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.495149 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d7a6efff-c0ad-43c3-999c-d4840d3c5825-scripts\") pod \"swift-ring-rebalance-vnskv\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.495168 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d7a6efff-c0ad-43c3-999c-d4840d3c5825-etc-swift\") pod \"swift-ring-rebalance-vnskv\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.495221 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/27053933-785a-44a7-8468-293246bd1721-ring-data-devices\") pod \"swift-ring-rebalance-bl4lw\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.495245 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fmfs\" (UniqueName: \"kubernetes.io/projected/d7a6efff-c0ad-43c3-999c-d4840d3c5825-kube-api-access-2fmfs\") pod \"swift-ring-rebalance-vnskv\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.495268 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d7a6efff-c0ad-43c3-999c-d4840d3c5825-dispersionconf\") pod \"swift-ring-rebalance-vnskv\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.495421 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7a6efff-c0ad-43c3-999c-d4840d3c5825-combined-ca-bundle\") pod \"swift-ring-rebalance-vnskv\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.495567 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvwzg\" (UniqueName: \"kubernetes.io/projected/27053933-785a-44a7-8468-293246bd1721-kube-api-access-xvwzg\") pod \"swift-ring-rebalance-bl4lw\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.495626 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/27053933-785a-44a7-8468-293246bd1721-scripts\") pod \"swift-ring-rebalance-bl4lw\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.495698 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d7a6efff-c0ad-43c3-999c-d4840d3c5825-swiftconf\") pod \"swift-ring-rebalance-vnskv\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.495757 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27053933-785a-44a7-8468-293246bd1721-combined-ca-bundle\") pod \"swift-ring-rebalance-bl4lw\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.552211 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-x8p79" event={"ID":"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97","Type":"ContainerStarted","Data":"3492438d70fa7dffd467593b968240b9c55b6252e9750f3eca1b29a489d641b0"} Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.552238 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.552467 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" podUID="700d8512-7d20-4527-ac2f-16bfd6ad1010" containerName="dnsmasq-dns" containerID="cri-o://de03cc9e6f804b282b6c7a8e8ddaab61dc6a416d6440592ea13952aac1e331c0" gracePeriod=10 Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.565212 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.597603 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d7a6efff-c0ad-43c3-999c-d4840d3c5825-dispersionconf\") pod \"swift-ring-rebalance-vnskv\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.597700 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7a6efff-c0ad-43c3-999c-d4840d3c5825-combined-ca-bundle\") pod \"swift-ring-rebalance-vnskv\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.597758 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-etc-swift\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") " pod="openstack/swift-storage-0" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.597794 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvwzg\" (UniqueName: \"kubernetes.io/projected/27053933-785a-44a7-8468-293246bd1721-kube-api-access-xvwzg\") pod \"swift-ring-rebalance-bl4lw\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.597855 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/27053933-785a-44a7-8468-293246bd1721-scripts\") pod \"swift-ring-rebalance-bl4lw\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.597896 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d7a6efff-c0ad-43c3-999c-d4840d3c5825-swiftconf\") pod \"swift-ring-rebalance-vnskv\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.597936 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27053933-785a-44a7-8468-293246bd1721-combined-ca-bundle\") pod \"swift-ring-rebalance-bl4lw\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.597969 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/27053933-785a-44a7-8468-293246bd1721-dispersionconf\") pod \"swift-ring-rebalance-bl4lw\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.597997 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/27053933-785a-44a7-8468-293246bd1721-etc-swift\") pod \"swift-ring-rebalance-bl4lw\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.598023 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/27053933-785a-44a7-8468-293246bd1721-swiftconf\") pod \"swift-ring-rebalance-bl4lw\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.598046 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d7a6efff-c0ad-43c3-999c-d4840d3c5825-ring-data-devices\") pod \"swift-ring-rebalance-vnskv\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.598108 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d7a6efff-c0ad-43c3-999c-d4840d3c5825-scripts\") pod \"swift-ring-rebalance-vnskv\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.598132 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d7a6efff-c0ad-43c3-999c-d4840d3c5825-etc-swift\") pod \"swift-ring-rebalance-vnskv\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.598163 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/27053933-785a-44a7-8468-293246bd1721-ring-data-devices\") pod \"swift-ring-rebalance-bl4lw\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.598184 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fmfs\" (UniqueName: \"kubernetes.io/projected/d7a6efff-c0ad-43c3-999c-d4840d3c5825-kube-api-access-2fmfs\") pod \"swift-ring-rebalance-vnskv\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.601352 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/27053933-785a-44a7-8468-293246bd1721-etc-swift\") pod \"swift-ring-rebalance-bl4lw\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.602884 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d7a6efff-c0ad-43c3-999c-d4840d3c5825-dispersionconf\") pod \"swift-ring-rebalance-vnskv\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: E1124 13:38:45.602955 5039 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 24 13:38:45 crc kubenswrapper[5039]: E1124 13:38:45.602982 5039 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 24 13:38:45 crc kubenswrapper[5039]: E1124 13:38:45.603032 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-etc-swift podName:ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f nodeName:}" failed. No retries permitted until 2025-11-24 13:38:46.60301144 +0000 UTC m=+1239.042136000 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-etc-swift") pod "swift-storage-0" (UID: "ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f") : configmap "swift-ring-files" not found Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.603241 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/27053933-785a-44a7-8468-293246bd1721-dispersionconf\") pod \"swift-ring-rebalance-bl4lw\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.603592 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d7a6efff-c0ad-43c3-999c-d4840d3c5825-ring-data-devices\") pod \"swift-ring-rebalance-vnskv\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.603680 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/27053933-785a-44a7-8468-293246bd1721-swiftconf\") pod \"swift-ring-rebalance-bl4lw\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.603687 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d7a6efff-c0ad-43c3-999c-d4840d3c5825-etc-swift\") pod \"swift-ring-rebalance-vnskv\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.603949 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/27053933-785a-44a7-8468-293246bd1721-scripts\") pod \"swift-ring-rebalance-bl4lw\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.604276 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d7a6efff-c0ad-43c3-999c-d4840d3c5825-scripts\") pod \"swift-ring-rebalance-vnskv\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.607286 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27053933-785a-44a7-8468-293246bd1721-combined-ca-bundle\") pod \"swift-ring-rebalance-bl4lw\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.607708 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7a6efff-c0ad-43c3-999c-d4840d3c5825-combined-ca-bundle\") pod \"swift-ring-rebalance-vnskv\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.609539 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d7a6efff-c0ad-43c3-999c-d4840d3c5825-swiftconf\") pod \"swift-ring-rebalance-vnskv\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.619930 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fmfs\" (UniqueName: \"kubernetes.io/projected/d7a6efff-c0ad-43c3-999c-d4840d3c5825-kube-api-access-2fmfs\") pod \"swift-ring-rebalance-vnskv\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.626299 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvwzg\" (UniqueName: \"kubernetes.io/projected/27053933-785a-44a7-8468-293246bd1721-kube-api-access-xvwzg\") pod \"swift-ring-rebalance-bl4lw\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.699818 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27053933-785a-44a7-8468-293246bd1721-combined-ca-bundle\") pod \"27053933-785a-44a7-8468-293246bd1721\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.699910 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/27053933-785a-44a7-8468-293246bd1721-etc-swift\") pod \"27053933-785a-44a7-8468-293246bd1721\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.699945 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xvwzg\" (UniqueName: \"kubernetes.io/projected/27053933-785a-44a7-8468-293246bd1721-kube-api-access-xvwzg\") pod \"27053933-785a-44a7-8468-293246bd1721\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.700010 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/27053933-785a-44a7-8468-293246bd1721-scripts\") pod \"27053933-785a-44a7-8468-293246bd1721\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.700069 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/27053933-785a-44a7-8468-293246bd1721-dispersionconf\") pod \"27053933-785a-44a7-8468-293246bd1721\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.700103 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/27053933-785a-44a7-8468-293246bd1721-swiftconf\") pod \"27053933-785a-44a7-8468-293246bd1721\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.700308 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27053933-785a-44a7-8468-293246bd1721-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "27053933-785a-44a7-8468-293246bd1721" (UID: "27053933-785a-44a7-8468-293246bd1721"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.700491 5039 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/27053933-785a-44a7-8468-293246bd1721-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.700489 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27053933-785a-44a7-8468-293246bd1721-scripts" (OuterVolumeSpecName: "scripts") pod "27053933-785a-44a7-8468-293246bd1721" (UID: "27053933-785a-44a7-8468-293246bd1721"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.703834 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27053933-785a-44a7-8468-293246bd1721-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "27053933-785a-44a7-8468-293246bd1721" (UID: "27053933-785a-44a7-8468-293246bd1721"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.703882 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27053933-785a-44a7-8468-293246bd1721-kube-api-access-xvwzg" (OuterVolumeSpecName: "kube-api-access-xvwzg") pod "27053933-785a-44a7-8468-293246bd1721" (UID: "27053933-785a-44a7-8468-293246bd1721"). InnerVolumeSpecName "kube-api-access-xvwzg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.704092 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27053933-785a-44a7-8468-293246bd1721-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "27053933-785a-44a7-8468-293246bd1721" (UID: "27053933-785a-44a7-8468-293246bd1721"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.704867 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27053933-785a-44a7-8468-293246bd1721-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "27053933-785a-44a7-8468-293246bd1721" (UID: "27053933-785a-44a7-8468-293246bd1721"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.739928 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.740095 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/27053933-785a-44a7-8468-293246bd1721-ring-data-devices\") pod \"swift-ring-rebalance-bl4lw\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.801913 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/27053933-785a-44a7-8468-293246bd1721-ring-data-devices\") pod \"27053933-785a-44a7-8468-293246bd1721\" (UID: \"27053933-785a-44a7-8468-293246bd1721\") " Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.802488 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27053933-785a-44a7-8468-293246bd1721-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "27053933-785a-44a7-8468-293246bd1721" (UID: "27053933-785a-44a7-8468-293246bd1721"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.803003 5039 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/27053933-785a-44a7-8468-293246bd1721-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.803028 5039 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/27053933-785a-44a7-8468-293246bd1721-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.803041 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27053933-785a-44a7-8468-293246bd1721-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.803050 5039 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/27053933-785a-44a7-8468-293246bd1721-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.803059 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xvwzg\" (UniqueName: \"kubernetes.io/projected/27053933-785a-44a7-8468-293246bd1721-kube-api-access-xvwzg\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:45 crc kubenswrapper[5039]: I1124 13:38:45.803069 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/27053933-785a-44a7-8468-293246bd1721-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:46 crc kubenswrapper[5039]: I1124 13:38:46.201259 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-vnskv"] Nov 24 13:38:46 crc kubenswrapper[5039]: I1124 13:38:46.562552 5039 generic.go:334] "Generic (PLEG): container finished" podID="8bf2c80f-7e58-4be8-b373-6ddc3b0efb97" containerID="5555d5d66cf9ae3f3e72405ca6998cca456b0e969a29403b6cd98d7412cca701" exitCode=0 Nov 24 13:38:46 crc kubenswrapper[5039]: I1124 13:38:46.562813 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-x8p79" event={"ID":"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97","Type":"ContainerDied","Data":"5555d5d66cf9ae3f3e72405ca6998cca456b0e969a29403b6cd98d7412cca701"} Nov 24 13:38:46 crc kubenswrapper[5039]: I1124 13:38:46.565758 5039 generic.go:334] "Generic (PLEG): container finished" podID="700d8512-7d20-4527-ac2f-16bfd6ad1010" containerID="de03cc9e6f804b282b6c7a8e8ddaab61dc6a416d6440592ea13952aac1e331c0" exitCode=0 Nov 24 13:38:46 crc kubenswrapper[5039]: I1124 13:38:46.565824 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-bl4lw" Nov 24 13:38:46 crc kubenswrapper[5039]: I1124 13:38:46.565870 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" event={"ID":"700d8512-7d20-4527-ac2f-16bfd6ad1010","Type":"ContainerDied","Data":"de03cc9e6f804b282b6c7a8e8ddaab61dc6a416d6440592ea13952aac1e331c0"} Nov 24 13:38:46 crc kubenswrapper[5039]: I1124 13:38:46.619609 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-etc-swift\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") " pod="openstack/swift-storage-0" Nov 24 13:38:46 crc kubenswrapper[5039]: E1124 13:38:46.620698 5039 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 24 13:38:46 crc kubenswrapper[5039]: E1124 13:38:46.620740 5039 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 24 13:38:46 crc kubenswrapper[5039]: E1124 13:38:46.620802 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-etc-swift podName:ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f nodeName:}" failed. No retries permitted until 2025-11-24 13:38:48.620780413 +0000 UTC m=+1241.059904913 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-etc-swift") pod "swift-storage-0" (UID: "ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f") : configmap "swift-ring-files" not found Nov 24 13:38:46 crc kubenswrapper[5039]: I1124 13:38:46.638290 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-bl4lw"] Nov 24 13:38:46 crc kubenswrapper[5039]: I1124 13:38:46.649399 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-bl4lw"] Nov 24 13:38:48 crc kubenswrapper[5039]: I1124 13:38:48.321700 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27053933-785a-44a7-8468-293246bd1721" path="/var/lib/kubelet/pods/27053933-785a-44a7-8468-293246bd1721/volumes" Nov 24 13:38:48 crc kubenswrapper[5039]: I1124 13:38:48.664728 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-etc-swift\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") " pod="openstack/swift-storage-0" Nov 24 13:38:48 crc kubenswrapper[5039]: E1124 13:38:48.665035 5039 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 24 13:38:48 crc kubenswrapper[5039]: E1124 13:38:48.665084 5039 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 24 13:38:48 crc kubenswrapper[5039]: E1124 13:38:48.665181 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-etc-swift podName:ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f nodeName:}" failed. No retries permitted until 2025-11-24 13:38:52.665147906 +0000 UTC m=+1245.104272446 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-etc-swift") pod "swift-storage-0" (UID: "ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f") : configmap "swift-ring-files" not found Nov 24 13:38:50 crc kubenswrapper[5039]: I1124 13:38:50.102028 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:38:50 crc kubenswrapper[5039]: I1124 13:38:50.102110 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:38:50 crc kubenswrapper[5039]: I1124 13:38:50.769413 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" Nov 24 13:38:51 crc kubenswrapper[5039]: W1124 13:38:51.386450 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd7a6efff_c0ad_43c3_999c_d4840d3c5825.slice/crio-dab3249ef3b7c6c16d1094deb06995a7cfee3e5c9541b9b74c2edd60bc7bbf23 WatchSource:0}: Error finding container dab3249ef3b7c6c16d1094deb06995a7cfee3e5c9541b9b74c2edd60bc7bbf23: Status 404 returned error can't find the container with id dab3249ef3b7c6c16d1094deb06995a7cfee3e5c9541b9b74c2edd60bc7bbf23 Nov 24 13:38:51 crc kubenswrapper[5039]: I1124 13:38:51.524773 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" Nov 24 13:38:51 crc kubenswrapper[5039]: I1124 13:38:51.615744 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/700d8512-7d20-4527-ac2f-16bfd6ad1010-config\") pod \"700d8512-7d20-4527-ac2f-16bfd6ad1010\" (UID: \"700d8512-7d20-4527-ac2f-16bfd6ad1010\") " Nov 24 13:38:51 crc kubenswrapper[5039]: I1124 13:38:51.615861 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5g827\" (UniqueName: \"kubernetes.io/projected/700d8512-7d20-4527-ac2f-16bfd6ad1010-kube-api-access-5g827\") pod \"700d8512-7d20-4527-ac2f-16bfd6ad1010\" (UID: \"700d8512-7d20-4527-ac2f-16bfd6ad1010\") " Nov 24 13:38:51 crc kubenswrapper[5039]: I1124 13:38:51.615997 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/700d8512-7d20-4527-ac2f-16bfd6ad1010-dns-svc\") pod \"700d8512-7d20-4527-ac2f-16bfd6ad1010\" (UID: \"700d8512-7d20-4527-ac2f-16bfd6ad1010\") " Nov 24 13:38:51 crc kubenswrapper[5039]: I1124 13:38:51.616048 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/700d8512-7d20-4527-ac2f-16bfd6ad1010-ovsdbserver-sb\") pod \"700d8512-7d20-4527-ac2f-16bfd6ad1010\" (UID: \"700d8512-7d20-4527-ac2f-16bfd6ad1010\") " Nov 24 13:38:51 crc kubenswrapper[5039]: I1124 13:38:51.619819 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/700d8512-7d20-4527-ac2f-16bfd6ad1010-kube-api-access-5g827" (OuterVolumeSpecName: "kube-api-access-5g827") pod "700d8512-7d20-4527-ac2f-16bfd6ad1010" (UID: "700d8512-7d20-4527-ac2f-16bfd6ad1010"). InnerVolumeSpecName "kube-api-access-5g827". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:38:51 crc kubenswrapper[5039]: I1124 13:38:51.627440 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-vnskv" event={"ID":"d7a6efff-c0ad-43c3-999c-d4840d3c5825","Type":"ContainerStarted","Data":"dab3249ef3b7c6c16d1094deb06995a7cfee3e5c9541b9b74c2edd60bc7bbf23"} Nov 24 13:38:51 crc kubenswrapper[5039]: I1124 13:38:51.629394 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" event={"ID":"700d8512-7d20-4527-ac2f-16bfd6ad1010","Type":"ContainerDied","Data":"6e61e5ece10258b30f95851e8b19040814e2d1d08ccf5572d92328be7151e53b"} Nov 24 13:38:51 crc kubenswrapper[5039]: I1124 13:38:51.629434 5039 scope.go:117] "RemoveContainer" containerID="de03cc9e6f804b282b6c7a8e8ddaab61dc6a416d6440592ea13952aac1e331c0" Nov 24 13:38:51 crc kubenswrapper[5039]: I1124 13:38:51.629588 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" Nov 24 13:38:51 crc kubenswrapper[5039]: I1124 13:38:51.700574 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/700d8512-7d20-4527-ac2f-16bfd6ad1010-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "700d8512-7d20-4527-ac2f-16bfd6ad1010" (UID: "700d8512-7d20-4527-ac2f-16bfd6ad1010"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:38:51 crc kubenswrapper[5039]: I1124 13:38:51.702365 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/700d8512-7d20-4527-ac2f-16bfd6ad1010-config" (OuterVolumeSpecName: "config") pod "700d8512-7d20-4527-ac2f-16bfd6ad1010" (UID: "700d8512-7d20-4527-ac2f-16bfd6ad1010"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:38:51 crc kubenswrapper[5039]: I1124 13:38:51.711040 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/700d8512-7d20-4527-ac2f-16bfd6ad1010-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "700d8512-7d20-4527-ac2f-16bfd6ad1010" (UID: "700d8512-7d20-4527-ac2f-16bfd6ad1010"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:38:51 crc kubenswrapper[5039]: I1124 13:38:51.718384 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/700d8512-7d20-4527-ac2f-16bfd6ad1010-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:51 crc kubenswrapper[5039]: I1124 13:38:51.718575 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5g827\" (UniqueName: \"kubernetes.io/projected/700d8512-7d20-4527-ac2f-16bfd6ad1010-kube-api-access-5g827\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:51 crc kubenswrapper[5039]: I1124 13:38:51.718655 5039 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/700d8512-7d20-4527-ac2f-16bfd6ad1010-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:51 crc kubenswrapper[5039]: I1124 13:38:51.718718 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/700d8512-7d20-4527-ac2f-16bfd6ad1010-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 13:38:51 crc kubenswrapper[5039]: I1124 13:38:51.900342 5039 scope.go:117] "RemoveContainer" containerID="864e31ec8b1decfc2ab85c7bebdfd6bec4a353454abe69e3b038ba5d5e6b9b0a" Nov 24 13:38:51 crc kubenswrapper[5039]: I1124 13:38:51.962437 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-pccxz"] Nov 24 13:38:51 crc kubenswrapper[5039]: I1124 13:38:51.969623 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-pccxz"] Nov 24 13:38:52 crc kubenswrapper[5039]: I1124 13:38:52.327715 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="700d8512-7d20-4527-ac2f-16bfd6ad1010" path="/var/lib/kubelet/pods/700d8512-7d20-4527-ac2f-16bfd6ad1010/volumes" Nov 24 13:38:52 crc kubenswrapper[5039]: I1124 13:38:52.645744 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"9523b0b0-e489-4eb8-8954-83dd766373df","Type":"ContainerStarted","Data":"92abc7fbc7b5b1880effe9f85a27288463fc001488139e8e5d334c30397584a7"} Nov 24 13:38:52 crc kubenswrapper[5039]: I1124 13:38:52.649615 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-x8p79" event={"ID":"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97","Type":"ContainerStarted","Data":"265cfdeb3221f1fbe3ac727c6c9ddc2260a0654a27e4ee87db3f1cfd7854c3dd"} Nov 24 13:38:52 crc kubenswrapper[5039]: I1124 13:38:52.649754 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-x8p79" Nov 24 13:38:52 crc kubenswrapper[5039]: I1124 13:38:52.670659 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-x8p79" podStartSLOduration=9.670642499 podStartE2EDuration="9.670642499s" podCreationTimestamp="2025-11-24 13:38:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:38:52.666684492 +0000 UTC m=+1245.105809002" watchObservedRunningTime="2025-11-24 13:38:52.670642499 +0000 UTC m=+1245.109766999" Nov 24 13:38:52 crc kubenswrapper[5039]: I1124 13:38:52.743714 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-etc-swift\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") " pod="openstack/swift-storage-0" Nov 24 13:38:52 crc kubenswrapper[5039]: E1124 13:38:52.744384 5039 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 24 13:38:52 crc kubenswrapper[5039]: E1124 13:38:52.744401 5039 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 24 13:38:52 crc kubenswrapper[5039]: E1124 13:38:52.744447 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-etc-swift podName:ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f nodeName:}" failed. No retries permitted until 2025-11-24 13:39:00.74443108 +0000 UTC m=+1253.183555580 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-etc-swift") pod "swift-storage-0" (UID: "ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f") : configmap "swift-ring-files" not found Nov 24 13:38:53 crc kubenswrapper[5039]: I1124 13:38:53.667884 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"9523b0b0-e489-4eb8-8954-83dd766373df","Type":"ContainerStarted","Data":"72c153ba8625628e416b0959611f3fe62add37108b048fab2f0b7211a45fddae"} Nov 24 13:38:53 crc kubenswrapper[5039]: I1124 13:38:53.668615 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 24 13:38:53 crc kubenswrapper[5039]: I1124 13:38:53.671431 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"47799b2c-4219-475d-9a09-580720622ee4","Type":"ContainerStarted","Data":"019bf5ada2966b97ed9f89b360fb4923ba460479c6a6eeff6dde749ba67d748a"} Nov 24 13:38:53 crc kubenswrapper[5039]: I1124 13:38:53.672138 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 24 13:38:53 crc kubenswrapper[5039]: I1124 13:38:53.674067 5039 generic.go:334] "Generic (PLEG): container finished" podID="1dcf47d4-1399-46bb-bda8-5dfeb96a3b60" containerID="c54a6808ce60d5b17b8c478aa932d235816ea8f6182d0f860b99c79d9cef51af" exitCode=0 Nov 24 13:38:53 crc kubenswrapper[5039]: I1124 13:38:53.674566 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60","Type":"ContainerDied","Data":"c54a6808ce60d5b17b8c478aa932d235816ea8f6182d0f860b99c79d9cef51af"} Nov 24 13:38:53 crc kubenswrapper[5039]: I1124 13:38:53.693334 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=3.477237529 podStartE2EDuration="12.693318092s" podCreationTimestamp="2025-11-24 13:38:41 +0000 UTC" firstStartedPulling="2025-11-24 13:38:42.727747276 +0000 UTC m=+1235.166871776" lastFinishedPulling="2025-11-24 13:38:51.943827849 +0000 UTC m=+1244.382952339" observedRunningTime="2025-11-24 13:38:53.685738957 +0000 UTC m=+1246.124863477" watchObservedRunningTime="2025-11-24 13:38:53.693318092 +0000 UTC m=+1246.132442592" Nov 24 13:38:53 crc kubenswrapper[5039]: I1124 13:38:53.706134 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=29.907922867 podStartE2EDuration="1m20.706117124s" podCreationTimestamp="2025-11-24 13:37:33 +0000 UTC" firstStartedPulling="2025-11-24 13:38:01.801865147 +0000 UTC m=+1194.240989647" lastFinishedPulling="2025-11-24 13:38:52.600059404 +0000 UTC m=+1245.039183904" observedRunningTime="2025-11-24 13:38:53.702798413 +0000 UTC m=+1246.141922913" watchObservedRunningTime="2025-11-24 13:38:53.706117124 +0000 UTC m=+1246.145241624" Nov 24 13:38:54 crc kubenswrapper[5039]: I1124 13:38:54.686252 5039 generic.go:334] "Generic (PLEG): container finished" podID="c3dc205b-caf2-45c8-8110-d0f8be91e10f" containerID="07c368dd9f6f77959eb11530bb57f922eac7e01f2c46f82a8ce212799e5ea8e4" exitCode=0 Nov 24 13:38:54 crc kubenswrapper[5039]: I1124 13:38:54.686338 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c3dc205b-caf2-45c8-8110-d0f8be91e10f","Type":"ContainerDied","Data":"07c368dd9f6f77959eb11530bb57f922eac7e01f2c46f82a8ce212799e5ea8e4"} Nov 24 13:38:55 crc kubenswrapper[5039]: I1124 13:38:55.105064 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7f896c8c65-pccxz" podUID="700d8512-7d20-4527-ac2f-16bfd6ad1010" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.135:5353: i/o timeout" Nov 24 13:38:59 crc kubenswrapper[5039]: I1124 13:38:59.101731 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-x8p79" Nov 24 13:38:59 crc kubenswrapper[5039]: I1124 13:38:59.167097 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-vml7s"] Nov 24 13:38:59 crc kubenswrapper[5039]: I1124 13:38:59.167352 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" podUID="ab1f8974-d270-4772-b638-019df2da6954" containerName="dnsmasq-dns" containerID="cri-o://d3b91a683fc96d83dc1eceb97781f6105280bedd756939332f7bd44be0084c7c" gracePeriod=10 Nov 24 13:38:59 crc kubenswrapper[5039]: I1124 13:38:59.739497 5039 generic.go:334] "Generic (PLEG): container finished" podID="ab1f8974-d270-4772-b638-019df2da6954" containerID="d3b91a683fc96d83dc1eceb97781f6105280bedd756939332f7bd44be0084c7c" exitCode=0 Nov 24 13:38:59 crc kubenswrapper[5039]: I1124 13:38:59.739633 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" event={"ID":"ab1f8974-d270-4772-b638-019df2da6954","Type":"ContainerDied","Data":"d3b91a683fc96d83dc1eceb97781f6105280bedd756939332f7bd44be0084c7c"} Nov 24 13:39:00 crc kubenswrapper[5039]: I1124 13:39:00.817987 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-etc-swift\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") " pod="openstack/swift-storage-0" Nov 24 13:39:00 crc kubenswrapper[5039]: E1124 13:39:00.818266 5039 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 24 13:39:00 crc kubenswrapper[5039]: E1124 13:39:00.818568 5039 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 24 13:39:00 crc kubenswrapper[5039]: E1124 13:39:00.818629 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-etc-swift podName:ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f nodeName:}" failed. No retries permitted until 2025-11-24 13:39:16.818607489 +0000 UTC m=+1269.257731989 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-etc-swift") pod "swift-storage-0" (UID: "ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f") : configmap "swift-ring-files" not found Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.420436 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.531665 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-ovsdbserver-nb\") pod \"ab1f8974-d270-4772-b638-019df2da6954\" (UID: \"ab1f8974-d270-4772-b638-019df2da6954\") " Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.531758 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-dns-svc\") pod \"ab1f8974-d270-4772-b638-019df2da6954\" (UID: \"ab1f8974-d270-4772-b638-019df2da6954\") " Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.531894 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z8r26\" (UniqueName: \"kubernetes.io/projected/ab1f8974-d270-4772-b638-019df2da6954-kube-api-access-z8r26\") pod \"ab1f8974-d270-4772-b638-019df2da6954\" (UID: \"ab1f8974-d270-4772-b638-019df2da6954\") " Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.531923 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-config\") pod \"ab1f8974-d270-4772-b638-019df2da6954\" (UID: \"ab1f8974-d270-4772-b638-019df2da6954\") " Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.532019 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-ovsdbserver-sb\") pod \"ab1f8974-d270-4772-b638-019df2da6954\" (UID: \"ab1f8974-d270-4772-b638-019df2da6954\") " Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.536156 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab1f8974-d270-4772-b638-019df2da6954-kube-api-access-z8r26" (OuterVolumeSpecName: "kube-api-access-z8r26") pod "ab1f8974-d270-4772-b638-019df2da6954" (UID: "ab1f8974-d270-4772-b638-019df2da6954"). InnerVolumeSpecName "kube-api-access-z8r26". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.577656 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ab1f8974-d270-4772-b638-019df2da6954" (UID: "ab1f8974-d270-4772-b638-019df2da6954"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.579402 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ab1f8974-d270-4772-b638-019df2da6954" (UID: "ab1f8974-d270-4772-b638-019df2da6954"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.580006 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ab1f8974-d270-4772-b638-019df2da6954" (UID: "ab1f8974-d270-4772-b638-019df2da6954"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.587725 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-config" (OuterVolumeSpecName: "config") pod "ab1f8974-d270-4772-b638-019df2da6954" (UID: "ab1f8974-d270-4772-b638-019df2da6954"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.634347 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.634577 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.634647 5039 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.634706 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z8r26\" (UniqueName: \"kubernetes.io/projected/ab1f8974-d270-4772-b638-019df2da6954-kube-api-access-z8r26\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.634782 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab1f8974-d270-4772-b638-019df2da6954-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.773096 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1dcf47d4-1399-46bb-bda8-5dfeb96a3b60","Type":"ContainerStarted","Data":"e3fbc709b3617dbf58fbf0b4ddb5158b2864b8e6a0896040904813987a44fbf0"} Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.777955 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" event={"ID":"ab1f8974-d270-4772-b638-019df2da6954","Type":"ContainerDied","Data":"5d113230aaaab4e1ade1991bedebb226da521ab03b5d4b4df00b32c1f9b70adb"} Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.778008 5039 scope.go:117] "RemoveContainer" containerID="d3b91a683fc96d83dc1eceb97781f6105280bedd756939332f7bd44be0084c7c" Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.778127 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.781120 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-vnskv" event={"ID":"d7a6efff-c0ad-43c3-999c-d4840d3c5825","Type":"ContainerStarted","Data":"f9cba9e2fe29d928cbd7373d3257581c145bfd02b29d6acf5a55c7f50d18c2ee"} Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.784046 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"869a1d3b-808b-4a44-b300-c2fb36a07e8a","Type":"ContainerStarted","Data":"5ee3e91535ae6ff6ad47dbade6d8e98d9a12d0c46b4c42d1d355aa92031e2806"} Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.787998 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c3dc205b-caf2-45c8-8110-d0f8be91e10f","Type":"ContainerStarted","Data":"2ba1d6425ebdbf20907cb720bb667068d0fba1f0c9942f4b3a026c69b7c7bc54"} Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.799999 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=77.848772199 podStartE2EDuration="1m32.799978534s" podCreationTimestamp="2025-11-24 13:37:29 +0000 UTC" firstStartedPulling="2025-11-24 13:38:01.715238123 +0000 UTC m=+1194.154362643" lastFinishedPulling="2025-11-24 13:38:16.666444468 +0000 UTC m=+1209.105568978" observedRunningTime="2025-11-24 13:39:01.793099466 +0000 UTC m=+1254.232223966" watchObservedRunningTime="2025-11-24 13:39:01.799978534 +0000 UTC m=+1254.239103034" Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.815752 5039 scope.go:117] "RemoveContainer" containerID="a3f1ea8d0163751ad991897045a02e7e43879e23485e8d980e75b45aef827608" Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.818273 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-vml7s"] Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.824007 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-vml7s"] Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.834061 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=71.715825036 podStartE2EDuration="1m34.834044935s" podCreationTimestamp="2025-11-24 13:37:27 +0000 UTC" firstStartedPulling="2025-11-24 13:38:01.683038348 +0000 UTC m=+1194.122162848" lastFinishedPulling="2025-11-24 13:38:24.801258247 +0000 UTC m=+1217.240382747" observedRunningTime="2025-11-24 13:39:01.83177087 +0000 UTC m=+1254.270895380" watchObservedRunningTime="2025-11-24 13:39:01.834044935 +0000 UTC m=+1254.273169445" Nov 24 13:39:01 crc kubenswrapper[5039]: I1124 13:39:01.856786 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-vnskv" podStartSLOduration=6.998285958 podStartE2EDuration="16.85676796s" podCreationTimestamp="2025-11-24 13:38:45 +0000 UTC" firstStartedPulling="2025-11-24 13:38:51.388078658 +0000 UTC m=+1243.827203158" lastFinishedPulling="2025-11-24 13:39:01.24656066 +0000 UTC m=+1253.685685160" observedRunningTime="2025-11-24 13:39:01.8510381 +0000 UTC m=+1254.290162610" watchObservedRunningTime="2025-11-24 13:39:01.85676796 +0000 UTC m=+1254.295892460" Nov 24 13:39:02 crc kubenswrapper[5039]: I1124 13:39:02.299460 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 24 13:39:02 crc kubenswrapper[5039]: I1124 13:39:02.322372 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab1f8974-d270-4772-b638-019df2da6954" path="/var/lib/kubelet/pods/ab1f8974-d270-4772-b638-019df2da6954/volumes" Nov 24 13:39:03 crc kubenswrapper[5039]: I1124 13:39:03.759677 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 24 13:39:05 crc kubenswrapper[5039]: I1124 13:39:05.770643 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-86db49b7ff-vml7s" podUID="ab1f8974-d270-4772-b638-019df2da6954" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.137:5353: i/o timeout" Nov 24 13:39:05 crc kubenswrapper[5039]: I1124 13:39:05.824265 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"869a1d3b-808b-4a44-b300-c2fb36a07e8a","Type":"ContainerStarted","Data":"5602a6ec6e447635c28660407ff827df2f9bdb8d4080912e1541a6e2233bc34a"} Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.570126 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-5dqj9" podUID="4fc86906-5a7c-4bfe-8d23-1c98a8711a4a" containerName="ovn-controller" probeResult="failure" output=< Nov 24 13:39:08 crc kubenswrapper[5039]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 24 13:39:08 crc kubenswrapper[5039]: > Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.588745 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.601315 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-2cfx8" Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.835851 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-5dqj9-config-tv58p"] Nov 24 13:39:08 crc kubenswrapper[5039]: E1124 13:39:08.836277 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab1f8974-d270-4772-b638-019df2da6954" containerName="dnsmasq-dns" Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.836301 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab1f8974-d270-4772-b638-019df2da6954" containerName="dnsmasq-dns" Nov 24 13:39:08 crc kubenswrapper[5039]: E1124 13:39:08.836321 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab1f8974-d270-4772-b638-019df2da6954" containerName="init" Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.836329 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab1f8974-d270-4772-b638-019df2da6954" containerName="init" Nov 24 13:39:08 crc kubenswrapper[5039]: E1124 13:39:08.836363 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="700d8512-7d20-4527-ac2f-16bfd6ad1010" containerName="init" Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.836371 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="700d8512-7d20-4527-ac2f-16bfd6ad1010" containerName="init" Nov 24 13:39:08 crc kubenswrapper[5039]: E1124 13:39:08.836385 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="700d8512-7d20-4527-ac2f-16bfd6ad1010" containerName="dnsmasq-dns" Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.836393 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="700d8512-7d20-4527-ac2f-16bfd6ad1010" containerName="dnsmasq-dns" Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.838230 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="700d8512-7d20-4527-ac2f-16bfd6ad1010" containerName="dnsmasq-dns" Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.838266 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab1f8974-d270-4772-b638-019df2da6954" containerName="dnsmasq-dns" Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.839083 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.843150 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.846385 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5dqj9-config-tv58p"] Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.873153 5039 generic.go:334] "Generic (PLEG): container finished" podID="6808fd4e-3718-430c-87e8-ca3e801a8248" containerID="ea9ea3aa66fddb386be5ad5b6935a74401ac939b2d26eb0fc82ed0db374a0398" exitCode=0 Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.873239 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"6808fd4e-3718-430c-87e8-ca3e801a8248","Type":"ContainerDied","Data":"ea9ea3aa66fddb386be5ad5b6935a74401ac939b2d26eb0fc82ed0db374a0398"} Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.877374 5039 generic.go:334] "Generic (PLEG): container finished" podID="8e2e73c0-db1d-45e0-b056-0ed13bdbb904" containerID="6e40255f7df394711b21c74b237ac11944fc65febdf994a8a02b4cffa191e21c" exitCode=0 Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.877415 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"8e2e73c0-db1d-45e0-b056-0ed13bdbb904","Type":"ContainerDied","Data":"6e40255f7df394711b21c74b237ac11944fc65febdf994a8a02b4cffa191e21c"} Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.970597 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2a9d01b2-0648-4e44-94da-c66e78473756-var-run\") pod \"ovn-controller-5dqj9-config-tv58p\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.971054 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jptqb\" (UniqueName: \"kubernetes.io/projected/2a9d01b2-0648-4e44-94da-c66e78473756-kube-api-access-jptqb\") pod \"ovn-controller-5dqj9-config-tv58p\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.971100 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2a9d01b2-0648-4e44-94da-c66e78473756-var-run-ovn\") pod \"ovn-controller-5dqj9-config-tv58p\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.971220 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2a9d01b2-0648-4e44-94da-c66e78473756-scripts\") pod \"ovn-controller-5dqj9-config-tv58p\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.971288 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2a9d01b2-0648-4e44-94da-c66e78473756-var-log-ovn\") pod \"ovn-controller-5dqj9-config-tv58p\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:08 crc kubenswrapper[5039]: I1124 13:39:08.971336 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2a9d01b2-0648-4e44-94da-c66e78473756-additional-scripts\") pod \"ovn-controller-5dqj9-config-tv58p\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:09 crc kubenswrapper[5039]: I1124 13:39:09.073680 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2a9d01b2-0648-4e44-94da-c66e78473756-var-run\") pod \"ovn-controller-5dqj9-config-tv58p\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:09 crc kubenswrapper[5039]: I1124 13:39:09.073777 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jptqb\" (UniqueName: \"kubernetes.io/projected/2a9d01b2-0648-4e44-94da-c66e78473756-kube-api-access-jptqb\") pod \"ovn-controller-5dqj9-config-tv58p\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:09 crc kubenswrapper[5039]: I1124 13:39:09.073807 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2a9d01b2-0648-4e44-94da-c66e78473756-var-run-ovn\") pod \"ovn-controller-5dqj9-config-tv58p\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:09 crc kubenswrapper[5039]: I1124 13:39:09.073871 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2a9d01b2-0648-4e44-94da-c66e78473756-scripts\") pod \"ovn-controller-5dqj9-config-tv58p\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:09 crc kubenswrapper[5039]: I1124 13:39:09.073914 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2a9d01b2-0648-4e44-94da-c66e78473756-var-log-ovn\") pod \"ovn-controller-5dqj9-config-tv58p\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:09 crc kubenswrapper[5039]: I1124 13:39:09.073939 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2a9d01b2-0648-4e44-94da-c66e78473756-additional-scripts\") pod \"ovn-controller-5dqj9-config-tv58p\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:09 crc kubenswrapper[5039]: I1124 13:39:09.074796 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2a9d01b2-0648-4e44-94da-c66e78473756-additional-scripts\") pod \"ovn-controller-5dqj9-config-tv58p\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:09 crc kubenswrapper[5039]: I1124 13:39:09.075093 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2a9d01b2-0648-4e44-94da-c66e78473756-var-run\") pod \"ovn-controller-5dqj9-config-tv58p\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:09 crc kubenswrapper[5039]: I1124 13:39:09.075660 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2a9d01b2-0648-4e44-94da-c66e78473756-var-run-ovn\") pod \"ovn-controller-5dqj9-config-tv58p\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:09 crc kubenswrapper[5039]: I1124 13:39:09.078140 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2a9d01b2-0648-4e44-94da-c66e78473756-scripts\") pod \"ovn-controller-5dqj9-config-tv58p\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:09 crc kubenswrapper[5039]: I1124 13:39:09.078276 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2a9d01b2-0648-4e44-94da-c66e78473756-var-log-ovn\") pod \"ovn-controller-5dqj9-config-tv58p\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:09 crc kubenswrapper[5039]: I1124 13:39:09.091475 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jptqb\" (UniqueName: \"kubernetes.io/projected/2a9d01b2-0648-4e44-94da-c66e78473756-kube-api-access-jptqb\") pod \"ovn-controller-5dqj9-config-tv58p\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:09 crc kubenswrapper[5039]: I1124 13:39:09.165952 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:09.599753 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:09.600157 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:09.622361 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5dqj9-config-tv58p"] Nov 24 13:39:14 crc kubenswrapper[5039]: W1124 13:39:09.964785 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a9d01b2_0648_4e44_94da_c66e78473756.slice/crio-9a1726c344d8f2bca17480873a9560a5b4ff73a3e0fb3a04677576d391108c97 WatchSource:0}: Error finding container 9a1726c344d8f2bca17480873a9560a5b4ff73a3e0fb3a04677576d391108c97: Status 404 returned error can't find the container with id 9a1726c344d8f2bca17480873a9560a5b4ff73a3e0fb3a04677576d391108c97 Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:10.852213 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:10.852279 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:10.899684 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5dqj9-config-tv58p" event={"ID":"2a9d01b2-0648-4e44-94da-c66e78473756","Type":"ContainerStarted","Data":"9a1726c344d8f2bca17480873a9560a5b4ff73a3e0fb3a04677576d391108c97"} Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:11.908679 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5dqj9-config-tv58p" event={"ID":"2a9d01b2-0648-4e44-94da-c66e78473756","Type":"ContainerStarted","Data":"f7f255a5b5ef739628a977bd9d4f3b8aff132ae39584d5944ef9176a0bc70c5e"} Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:11.910826 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"8e2e73c0-db1d-45e0-b056-0ed13bdbb904","Type":"ContainerStarted","Data":"3ee43750eeed8c71cd0a28ca11d7eb172974b9ffbe34600ac7ee2219070b935b"} Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:11.912472 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"6808fd4e-3718-430c-87e8-ca3e801a8248","Type":"ContainerStarted","Data":"56d5a0d96539de067af8cdca011d9daf93150bc8e470750cbef39cb456bf330d"} Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:12.923057 5039 generic.go:334] "Generic (PLEG): container finished" podID="2a9d01b2-0648-4e44-94da-c66e78473756" containerID="f7f255a5b5ef739628a977bd9d4f3b8aff132ae39584d5944ef9176a0bc70c5e" exitCode=0 Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:12.924714 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5dqj9-config-tv58p" event={"ID":"2a9d01b2-0648-4e44-94da-c66e78473756","Type":"ContainerDied","Data":"f7f255a5b5ef739628a977bd9d4f3b8aff132ae39584d5944ef9176a0bc70c5e"} Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:12.925256 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:12.925289 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:12.959281 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=43.362307544 podStartE2EDuration="1m46.959259238s" podCreationTimestamp="2025-11-24 13:37:26 +0000 UTC" firstStartedPulling="2025-11-24 13:37:28.196698892 +0000 UTC m=+1160.635823392" lastFinishedPulling="2025-11-24 13:38:31.793650546 +0000 UTC m=+1224.232775086" observedRunningTime="2025-11-24 13:39:12.953788535 +0000 UTC m=+1265.392913055" watchObservedRunningTime="2025-11-24 13:39:12.959259238 +0000 UTC m=+1265.398383738" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:12.984398 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=43.432869556 podStartE2EDuration="1m46.984377472s" podCreationTimestamp="2025-11-24 13:37:26 +0000 UTC" firstStartedPulling="2025-11-24 13:37:28.501280231 +0000 UTC m=+1160.940404721" lastFinishedPulling="2025-11-24 13:38:32.052788107 +0000 UTC m=+1224.491912637" observedRunningTime="2025-11-24 13:39:12.983350986 +0000 UTC m=+1265.422475486" watchObservedRunningTime="2025-11-24 13:39:12.984377472 +0000 UTC m=+1265.423501972" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:13.565782 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-5dqj9" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.205207 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.296027 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="c3dc205b-caf2-45c8-8110-d0f8be91e10f" containerName="galera" probeResult="failure" output=< Nov 24 13:39:14 crc kubenswrapper[5039]: wsrep_local_state_comment (Joined) differs from Synced Nov 24 13:39:14 crc kubenswrapper[5039]: > Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.799015 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.891796 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2a9d01b2-0648-4e44-94da-c66e78473756-scripts\") pod \"2a9d01b2-0648-4e44-94da-c66e78473756\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.891858 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2a9d01b2-0648-4e44-94da-c66e78473756-additional-scripts\") pod \"2a9d01b2-0648-4e44-94da-c66e78473756\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.891895 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2a9d01b2-0648-4e44-94da-c66e78473756-var-run-ovn\") pod \"2a9d01b2-0648-4e44-94da-c66e78473756\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.891931 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2a9d01b2-0648-4e44-94da-c66e78473756-var-log-ovn\") pod \"2a9d01b2-0648-4e44-94da-c66e78473756\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.891998 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2a9d01b2-0648-4e44-94da-c66e78473756-var-run\") pod \"2a9d01b2-0648-4e44-94da-c66e78473756\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.892045 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jptqb\" (UniqueName: \"kubernetes.io/projected/2a9d01b2-0648-4e44-94da-c66e78473756-kube-api-access-jptqb\") pod \"2a9d01b2-0648-4e44-94da-c66e78473756\" (UID: \"2a9d01b2-0648-4e44-94da-c66e78473756\") " Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.892223 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2a9d01b2-0648-4e44-94da-c66e78473756-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "2a9d01b2-0648-4e44-94da-c66e78473756" (UID: "2a9d01b2-0648-4e44-94da-c66e78473756"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.892246 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2a9d01b2-0648-4e44-94da-c66e78473756-var-run" (OuterVolumeSpecName: "var-run") pod "2a9d01b2-0648-4e44-94da-c66e78473756" (UID: "2a9d01b2-0648-4e44-94da-c66e78473756"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.892268 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2a9d01b2-0648-4e44-94da-c66e78473756-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "2a9d01b2-0648-4e44-94da-c66e78473756" (UID: "2a9d01b2-0648-4e44-94da-c66e78473756"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.892709 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a9d01b2-0648-4e44-94da-c66e78473756-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "2a9d01b2-0648-4e44-94da-c66e78473756" (UID: "2a9d01b2-0648-4e44-94da-c66e78473756"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.892920 5039 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2a9d01b2-0648-4e44-94da-c66e78473756-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.892997 5039 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2a9d01b2-0648-4e44-94da-c66e78473756-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.892964 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a9d01b2-0648-4e44-94da-c66e78473756-scripts" (OuterVolumeSpecName: "scripts") pod "2a9d01b2-0648-4e44-94da-c66e78473756" (UID: "2a9d01b2-0648-4e44-94da-c66e78473756"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.893067 5039 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2a9d01b2-0648-4e44-94da-c66e78473756-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.893130 5039 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2a9d01b2-0648-4e44-94da-c66e78473756-var-run\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.914171 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a9d01b2-0648-4e44-94da-c66e78473756-kube-api-access-jptqb" (OuterVolumeSpecName: "kube-api-access-jptqb") pod "2a9d01b2-0648-4e44-94da-c66e78473756" (UID: "2a9d01b2-0648-4e44-94da-c66e78473756"). InnerVolumeSpecName "kube-api-access-jptqb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.940909 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5dqj9-config-tv58p" event={"ID":"2a9d01b2-0648-4e44-94da-c66e78473756","Type":"ContainerDied","Data":"9a1726c344d8f2bca17480873a9560a5b4ff73a3e0fb3a04677576d391108c97"} Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.940997 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9a1726c344d8f2bca17480873a9560a5b4ff73a3e0fb3a04677576d391108c97" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.940938 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5dqj9-config-tv58p" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.943590 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"869a1d3b-808b-4a44-b300-c2fb36a07e8a","Type":"ContainerStarted","Data":"2b0da0cd1053cb07ac6a8370d2c07235c2d94001bf1c2acbc7c295f2a6252852"} Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.982707 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=29.333050747 podStartE2EDuration="1m41.982680879s" podCreationTimestamp="2025-11-24 13:37:33 +0000 UTC" firstStartedPulling="2025-11-24 13:38:01.815502749 +0000 UTC m=+1194.254627269" lastFinishedPulling="2025-11-24 13:39:14.465132901 +0000 UTC m=+1266.904257401" observedRunningTime="2025-11-24 13:39:14.962344123 +0000 UTC m=+1267.401468623" watchObservedRunningTime="2025-11-24 13:39:14.982680879 +0000 UTC m=+1267.421805379" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.994699 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jptqb\" (UniqueName: \"kubernetes.io/projected/2a9d01b2-0648-4e44-94da-c66e78473756-kube-api-access-jptqb\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:14 crc kubenswrapper[5039]: I1124 13:39:14.994743 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2a9d01b2-0648-4e44-94da-c66e78473756-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:15 crc kubenswrapper[5039]: I1124 13:39:15.044012 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:15 crc kubenswrapper[5039]: I1124 13:39:15.930592 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-5dqj9-config-tv58p"] Nov 24 13:39:15 crc kubenswrapper[5039]: I1124 13:39:15.939972 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-5dqj9-config-tv58p"] Nov 24 13:39:15 crc kubenswrapper[5039]: I1124 13:39:15.952855 5039 generic.go:334] "Generic (PLEG): container finished" podID="d7a6efff-c0ad-43c3-999c-d4840d3c5825" containerID="f9cba9e2fe29d928cbd7373d3257581c145bfd02b29d6acf5a55c7f50d18c2ee" exitCode=0 Nov 24 13:39:15 crc kubenswrapper[5039]: I1124 13:39:15.952956 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-vnskv" event={"ID":"d7a6efff-c0ad-43c3-999c-d4840d3c5825","Type":"ContainerDied","Data":"f9cba9e2fe29d928cbd7373d3257581c145bfd02b29d6acf5a55c7f50d18c2ee"} Nov 24 13:39:16 crc kubenswrapper[5039]: I1124 13:39:16.320323 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a9d01b2-0648-4e44-94da-c66e78473756" path="/var/lib/kubelet/pods/2a9d01b2-0648-4e44-94da-c66e78473756/volumes" Nov 24 13:39:16 crc kubenswrapper[5039]: I1124 13:39:16.824941 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-etc-swift\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") " pod="openstack/swift-storage-0" Nov 24 13:39:16 crc kubenswrapper[5039]: I1124 13:39:16.830956 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f-etc-swift\") pod \"swift-storage-0\" (UID: \"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f\") " pod="openstack/swift-storage-0" Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.017719 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.222908 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.354196 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="1dcf47d4-1399-46bb-bda8-5dfeb96a3b60" containerName="galera" probeResult="failure" output=< Nov 24 13:39:17 crc kubenswrapper[5039]: wsrep_local_state_comment (Joined) differs from Synced Nov 24 13:39:17 crc kubenswrapper[5039]: > Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.366975 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.436467 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d7a6efff-c0ad-43c3-999c-d4840d3c5825-scripts\") pod \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.436530 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d7a6efff-c0ad-43c3-999c-d4840d3c5825-swiftconf\") pod \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.436813 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d7a6efff-c0ad-43c3-999c-d4840d3c5825-etc-swift\") pod \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.436905 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7a6efff-c0ad-43c3-999c-d4840d3c5825-combined-ca-bundle\") pod \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.436941 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d7a6efff-c0ad-43c3-999c-d4840d3c5825-dispersionconf\") pod \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.437053 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d7a6efff-c0ad-43c3-999c-d4840d3c5825-ring-data-devices\") pod \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.437092 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2fmfs\" (UniqueName: \"kubernetes.io/projected/d7a6efff-c0ad-43c3-999c-d4840d3c5825-kube-api-access-2fmfs\") pod \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\" (UID: \"d7a6efff-c0ad-43c3-999c-d4840d3c5825\") " Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.437764 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7a6efff-c0ad-43c3-999c-d4840d3c5825-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "d7a6efff-c0ad-43c3-999c-d4840d3c5825" (UID: "d7a6efff-c0ad-43c3-999c-d4840d3c5825"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.437983 5039 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d7a6efff-c0ad-43c3-999c-d4840d3c5825-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.439001 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7a6efff-c0ad-43c3-999c-d4840d3c5825-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "d7a6efff-c0ad-43c3-999c-d4840d3c5825" (UID: "d7a6efff-c0ad-43c3-999c-d4840d3c5825"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.444426 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7a6efff-c0ad-43c3-999c-d4840d3c5825-kube-api-access-2fmfs" (OuterVolumeSpecName: "kube-api-access-2fmfs") pod "d7a6efff-c0ad-43c3-999c-d4840d3c5825" (UID: "d7a6efff-c0ad-43c3-999c-d4840d3c5825"). InnerVolumeSpecName "kube-api-access-2fmfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.445096 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7a6efff-c0ad-43c3-999c-d4840d3c5825-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "d7a6efff-c0ad-43c3-999c-d4840d3c5825" (UID: "d7a6efff-c0ad-43c3-999c-d4840d3c5825"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.469949 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7a6efff-c0ad-43c3-999c-d4840d3c5825-scripts" (OuterVolumeSpecName: "scripts") pod "d7a6efff-c0ad-43c3-999c-d4840d3c5825" (UID: "d7a6efff-c0ad-43c3-999c-d4840d3c5825"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.470675 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7a6efff-c0ad-43c3-999c-d4840d3c5825-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "d7a6efff-c0ad-43c3-999c-d4840d3c5825" (UID: "d7a6efff-c0ad-43c3-999c-d4840d3c5825"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.476671 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7a6efff-c0ad-43c3-999c-d4840d3c5825-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d7a6efff-c0ad-43c3-999c-d4840d3c5825" (UID: "d7a6efff-c0ad-43c3-999c-d4840d3c5825"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.539374 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7a6efff-c0ad-43c3-999c-d4840d3c5825-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.539422 5039 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d7a6efff-c0ad-43c3-999c-d4840d3c5825-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.539436 5039 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d7a6efff-c0ad-43c3-999c-d4840d3c5825-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.539448 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2fmfs\" (UniqueName: \"kubernetes.io/projected/d7a6efff-c0ad-43c3-999c-d4840d3c5825-kube-api-access-2fmfs\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.539463 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d7a6efff-c0ad-43c3-999c-d4840d3c5825-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.539476 5039 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d7a6efff-c0ad-43c3-999c-d4840d3c5825-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:17 crc kubenswrapper[5039]: W1124 13:39:17.680107 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac0d32e6_ff0b_4d8e_9094_b0edcc49cc8f.slice/crio-f5bf40c261d3c9c295e11ec608d08e8302fa651a6edcf787e68faf0e86cb3657 WatchSource:0}: Error finding container f5bf40c261d3c9c295e11ec608d08e8302fa651a6edcf787e68faf0e86cb3657: Status 404 returned error can't find the container with id f5bf40c261d3c9c295e11ec608d08e8302fa651a6edcf787e68faf0e86cb3657 Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.680726 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.970110 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-vnskv" event={"ID":"d7a6efff-c0ad-43c3-999c-d4840d3c5825","Type":"ContainerDied","Data":"dab3249ef3b7c6c16d1094deb06995a7cfee3e5c9541b9b74c2edd60bc7bbf23"} Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.970147 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dab3249ef3b7c6c16d1094deb06995a7cfee3e5c9541b9b74c2edd60bc7bbf23" Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.970174 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-vnskv" Nov 24 13:39:17 crc kubenswrapper[5039]: I1124 13:39:17.971487 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f","Type":"ContainerStarted","Data":"f5bf40c261d3c9c295e11ec608d08e8302fa651a6edcf787e68faf0e86cb3657"} Nov 24 13:39:19 crc kubenswrapper[5039]: I1124 13:39:19.673694 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 24 13:39:19 crc kubenswrapper[5039]: I1124 13:39:19.992158 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f","Type":"ContainerStarted","Data":"2ff2dcff2ccad0b9256967cbb99943005abcf5eb111cbc1665b77151f22796ee"} Nov 24 13:39:19 crc kubenswrapper[5039]: I1124 13:39:19.992202 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f","Type":"ContainerStarted","Data":"ecf4ae55c1d34bc141e22955d2bc68cf47c745cf0a231422d3b64a2a2040029b"} Nov 24 13:39:20 crc kubenswrapper[5039]: I1124 13:39:20.043804 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:20 crc kubenswrapper[5039]: I1124 13:39:20.047146 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:20 crc kubenswrapper[5039]: I1124 13:39:20.101967 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:39:20 crc kubenswrapper[5039]: I1124 13:39:20.102035 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:39:20 crc kubenswrapper[5039]: I1124 13:39:20.825798 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-26ef-account-create-jtkcn"] Nov 24 13:39:20 crc kubenswrapper[5039]: E1124 13:39:20.826649 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a9d01b2-0648-4e44-94da-c66e78473756" containerName="ovn-config" Nov 24 13:39:20 crc kubenswrapper[5039]: I1124 13:39:20.826667 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a9d01b2-0648-4e44-94da-c66e78473756" containerName="ovn-config" Nov 24 13:39:20 crc kubenswrapper[5039]: E1124 13:39:20.826696 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7a6efff-c0ad-43c3-999c-d4840d3c5825" containerName="swift-ring-rebalance" Nov 24 13:39:20 crc kubenswrapper[5039]: I1124 13:39:20.826705 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7a6efff-c0ad-43c3-999c-d4840d3c5825" containerName="swift-ring-rebalance" Nov 24 13:39:20 crc kubenswrapper[5039]: I1124 13:39:20.826916 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7a6efff-c0ad-43c3-999c-d4840d3c5825" containerName="swift-ring-rebalance" Nov 24 13:39:20 crc kubenswrapper[5039]: I1124 13:39:20.826942 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a9d01b2-0648-4e44-94da-c66e78473756" containerName="ovn-config" Nov 24 13:39:20 crc kubenswrapper[5039]: I1124 13:39:20.827952 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-26ef-account-create-jtkcn" Nov 24 13:39:20 crc kubenswrapper[5039]: I1124 13:39:20.834970 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-8xdmb"] Nov 24 13:39:20 crc kubenswrapper[5039]: I1124 13:39:20.834982 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 24 13:39:20 crc kubenswrapper[5039]: I1124 13:39:20.836412 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-8xdmb" Nov 24 13:39:20 crc kubenswrapper[5039]: I1124 13:39:20.853168 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-8xdmb"] Nov 24 13:39:20 crc kubenswrapper[5039]: I1124 13:39:20.868644 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-26ef-account-create-jtkcn"] Nov 24 13:39:20 crc kubenswrapper[5039]: I1124 13:39:20.906106 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d87b7ee-b5fa-442b-bf05-eaf35c945ca4-operator-scripts\") pod \"keystone-26ef-account-create-jtkcn\" (UID: \"3d87b7ee-b5fa-442b-bf05-eaf35c945ca4\") " pod="openstack/keystone-26ef-account-create-jtkcn" Nov 24 13:39:20 crc kubenswrapper[5039]: I1124 13:39:20.906268 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c87e221f-b63d-4883-a80b-084a56305cb1-operator-scripts\") pod \"keystone-db-create-8xdmb\" (UID: \"c87e221f-b63d-4883-a80b-084a56305cb1\") " pod="openstack/keystone-db-create-8xdmb" Nov 24 13:39:20 crc kubenswrapper[5039]: I1124 13:39:20.906331 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgrtc\" (UniqueName: \"kubernetes.io/projected/c87e221f-b63d-4883-a80b-084a56305cb1-kube-api-access-kgrtc\") pod \"keystone-db-create-8xdmb\" (UID: \"c87e221f-b63d-4883-a80b-084a56305cb1\") " pod="openstack/keystone-db-create-8xdmb" Nov 24 13:39:20 crc kubenswrapper[5039]: I1124 13:39:20.906538 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdtsw\" (UniqueName: \"kubernetes.io/projected/3d87b7ee-b5fa-442b-bf05-eaf35c945ca4-kube-api-access-fdtsw\") pod \"keystone-26ef-account-create-jtkcn\" (UID: \"3d87b7ee-b5fa-442b-bf05-eaf35c945ca4\") " pod="openstack/keystone-26ef-account-create-jtkcn" Nov 24 13:39:20 crc kubenswrapper[5039]: I1124 13:39:20.965642 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.004061 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f","Type":"ContainerStarted","Data":"2e9349f5c24b1834b050f45d0b6954bdbb6173c18457853be00644c7c4693c80"} Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.004110 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f","Type":"ContainerStarted","Data":"37d330368273eb395f92091dede5c2810ae40ed84b2fce4f078d429045dcaf2b"} Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.006016 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.008027 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdtsw\" (UniqueName: \"kubernetes.io/projected/3d87b7ee-b5fa-442b-bf05-eaf35c945ca4-kube-api-access-fdtsw\") pod \"keystone-26ef-account-create-jtkcn\" (UID: \"3d87b7ee-b5fa-442b-bf05-eaf35c945ca4\") " pod="openstack/keystone-26ef-account-create-jtkcn" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.008089 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d87b7ee-b5fa-442b-bf05-eaf35c945ca4-operator-scripts\") pod \"keystone-26ef-account-create-jtkcn\" (UID: \"3d87b7ee-b5fa-442b-bf05-eaf35c945ca4\") " pod="openstack/keystone-26ef-account-create-jtkcn" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.008187 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c87e221f-b63d-4883-a80b-084a56305cb1-operator-scripts\") pod \"keystone-db-create-8xdmb\" (UID: \"c87e221f-b63d-4883-a80b-084a56305cb1\") " pod="openstack/keystone-db-create-8xdmb" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.008228 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgrtc\" (UniqueName: \"kubernetes.io/projected/c87e221f-b63d-4883-a80b-084a56305cb1-kube-api-access-kgrtc\") pod \"keystone-db-create-8xdmb\" (UID: \"c87e221f-b63d-4883-a80b-084a56305cb1\") " pod="openstack/keystone-db-create-8xdmb" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.009111 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d87b7ee-b5fa-442b-bf05-eaf35c945ca4-operator-scripts\") pod \"keystone-26ef-account-create-jtkcn\" (UID: \"3d87b7ee-b5fa-442b-bf05-eaf35c945ca4\") " pod="openstack/keystone-26ef-account-create-jtkcn" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.009126 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c87e221f-b63d-4883-a80b-084a56305cb1-operator-scripts\") pod \"keystone-db-create-8xdmb\" (UID: \"c87e221f-b63d-4883-a80b-084a56305cb1\") " pod="openstack/keystone-db-create-8xdmb" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.018773 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-n9c94"] Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.020274 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-n9c94" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.037052 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdtsw\" (UniqueName: \"kubernetes.io/projected/3d87b7ee-b5fa-442b-bf05-eaf35c945ca4-kube-api-access-fdtsw\") pod \"keystone-26ef-account-create-jtkcn\" (UID: \"3d87b7ee-b5fa-442b-bf05-eaf35c945ca4\") " pod="openstack/keystone-26ef-account-create-jtkcn" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.068978 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-n9c94"] Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.069160 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgrtc\" (UniqueName: \"kubernetes.io/projected/c87e221f-b63d-4883-a80b-084a56305cb1-kube-api-access-kgrtc\") pod \"keystone-db-create-8xdmb\" (UID: \"c87e221f-b63d-4883-a80b-084a56305cb1\") " pod="openstack/keystone-db-create-8xdmb" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.118242 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdmdb\" (UniqueName: \"kubernetes.io/projected/cd84bf91-50f0-43ff-a40d-7973e2e54a0b-kube-api-access-cdmdb\") pod \"placement-db-create-n9c94\" (UID: \"cd84bf91-50f0-43ff-a40d-7973e2e54a0b\") " pod="openstack/placement-db-create-n9c94" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.118476 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd84bf91-50f0-43ff-a40d-7973e2e54a0b-operator-scripts\") pod \"placement-db-create-n9c94\" (UID: \"cd84bf91-50f0-43ff-a40d-7973e2e54a0b\") " pod="openstack/placement-db-create-n9c94" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.159915 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-683c-account-create-cddj7"] Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.160462 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-26ef-account-create-jtkcn" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.161045 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-683c-account-create-cddj7" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.163290 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.170142 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-683c-account-create-cddj7"] Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.171583 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-8xdmb" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.221041 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd84bf91-50f0-43ff-a40d-7973e2e54a0b-operator-scripts\") pod \"placement-db-create-n9c94\" (UID: \"cd84bf91-50f0-43ff-a40d-7973e2e54a0b\") " pod="openstack/placement-db-create-n9c94" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.221188 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cfcfc56f-8643-4195-8d18-b3076deac9d4-operator-scripts\") pod \"placement-683c-account-create-cddj7\" (UID: \"cfcfc56f-8643-4195-8d18-b3076deac9d4\") " pod="openstack/placement-683c-account-create-cddj7" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.221423 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pxlj\" (UniqueName: \"kubernetes.io/projected/cfcfc56f-8643-4195-8d18-b3076deac9d4-kube-api-access-9pxlj\") pod \"placement-683c-account-create-cddj7\" (UID: \"cfcfc56f-8643-4195-8d18-b3076deac9d4\") " pod="openstack/placement-683c-account-create-cddj7" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.221467 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdmdb\" (UniqueName: \"kubernetes.io/projected/cd84bf91-50f0-43ff-a40d-7973e2e54a0b-kube-api-access-cdmdb\") pod \"placement-db-create-n9c94\" (UID: \"cd84bf91-50f0-43ff-a40d-7973e2e54a0b\") " pod="openstack/placement-db-create-n9c94" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.222684 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd84bf91-50f0-43ff-a40d-7973e2e54a0b-operator-scripts\") pod \"placement-db-create-n9c94\" (UID: \"cd84bf91-50f0-43ff-a40d-7973e2e54a0b\") " pod="openstack/placement-db-create-n9c94" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.268581 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdmdb\" (UniqueName: \"kubernetes.io/projected/cd84bf91-50f0-43ff-a40d-7973e2e54a0b-kube-api-access-cdmdb\") pod \"placement-db-create-n9c94\" (UID: \"cd84bf91-50f0-43ff-a40d-7973e2e54a0b\") " pod="openstack/placement-db-create-n9c94" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.323011 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cfcfc56f-8643-4195-8d18-b3076deac9d4-operator-scripts\") pod \"placement-683c-account-create-cddj7\" (UID: \"cfcfc56f-8643-4195-8d18-b3076deac9d4\") " pod="openstack/placement-683c-account-create-cddj7" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.323080 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pxlj\" (UniqueName: \"kubernetes.io/projected/cfcfc56f-8643-4195-8d18-b3076deac9d4-kube-api-access-9pxlj\") pod \"placement-683c-account-create-cddj7\" (UID: \"cfcfc56f-8643-4195-8d18-b3076deac9d4\") " pod="openstack/placement-683c-account-create-cddj7" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.324743 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cfcfc56f-8643-4195-8d18-b3076deac9d4-operator-scripts\") pod \"placement-683c-account-create-cddj7\" (UID: \"cfcfc56f-8643-4195-8d18-b3076deac9d4\") " pod="openstack/placement-683c-account-create-cddj7" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.343109 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-n9c94" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.353810 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pxlj\" (UniqueName: \"kubernetes.io/projected/cfcfc56f-8643-4195-8d18-b3076deac9d4-kube-api-access-9pxlj\") pod \"placement-683c-account-create-cddj7\" (UID: \"cfcfc56f-8643-4195-8d18-b3076deac9d4\") " pod="openstack/placement-683c-account-create-cddj7" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.391359 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-l5gbh"] Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.394172 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-l5gbh" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.425363 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-l5gbh"] Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.425670 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39bc167d-fd5d-4522-8350-8a59cd32aced-operator-scripts\") pod \"glance-db-create-l5gbh\" (UID: \"39bc167d-fd5d-4522-8350-8a59cd32aced\") " pod="openstack/glance-db-create-l5gbh" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.425741 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9z2d\" (UniqueName: \"kubernetes.io/projected/39bc167d-fd5d-4522-8350-8a59cd32aced-kube-api-access-k9z2d\") pod \"glance-db-create-l5gbh\" (UID: \"39bc167d-fd5d-4522-8350-8a59cd32aced\") " pod="openstack/glance-db-create-l5gbh" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.461245 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-ddce-account-create-s8b2c"] Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.468947 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-ddce-account-create-s8b2c"] Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.469066 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-ddce-account-create-s8b2c" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.472283 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.484747 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-683c-account-create-cddj7" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.527903 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39bc167d-fd5d-4522-8350-8a59cd32aced-operator-scripts\") pod \"glance-db-create-l5gbh\" (UID: \"39bc167d-fd5d-4522-8350-8a59cd32aced\") " pod="openstack/glance-db-create-l5gbh" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.528029 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9z2d\" (UniqueName: \"kubernetes.io/projected/39bc167d-fd5d-4522-8350-8a59cd32aced-kube-api-access-k9z2d\") pod \"glance-db-create-l5gbh\" (UID: \"39bc167d-fd5d-4522-8350-8a59cd32aced\") " pod="openstack/glance-db-create-l5gbh" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.528116 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8721aea-88f2-4436-9334-b3b85b3b08ed-operator-scripts\") pod \"glance-ddce-account-create-s8b2c\" (UID: \"d8721aea-88f2-4436-9334-b3b85b3b08ed\") " pod="openstack/glance-ddce-account-create-s8b2c" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.528149 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-99qcb\" (UniqueName: \"kubernetes.io/projected/d8721aea-88f2-4436-9334-b3b85b3b08ed-kube-api-access-99qcb\") pod \"glance-ddce-account-create-s8b2c\" (UID: \"d8721aea-88f2-4436-9334-b3b85b3b08ed\") " pod="openstack/glance-ddce-account-create-s8b2c" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.528875 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39bc167d-fd5d-4522-8350-8a59cd32aced-operator-scripts\") pod \"glance-db-create-l5gbh\" (UID: \"39bc167d-fd5d-4522-8350-8a59cd32aced\") " pod="openstack/glance-db-create-l5gbh" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.551135 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9z2d\" (UniqueName: \"kubernetes.io/projected/39bc167d-fd5d-4522-8350-8a59cd32aced-kube-api-access-k9z2d\") pod \"glance-db-create-l5gbh\" (UID: \"39bc167d-fd5d-4522-8350-8a59cd32aced\") " pod="openstack/glance-db-create-l5gbh" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.629877 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8721aea-88f2-4436-9334-b3b85b3b08ed-operator-scripts\") pod \"glance-ddce-account-create-s8b2c\" (UID: \"d8721aea-88f2-4436-9334-b3b85b3b08ed\") " pod="openstack/glance-ddce-account-create-s8b2c" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.629971 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-99qcb\" (UniqueName: \"kubernetes.io/projected/d8721aea-88f2-4436-9334-b3b85b3b08ed-kube-api-access-99qcb\") pod \"glance-ddce-account-create-s8b2c\" (UID: \"d8721aea-88f2-4436-9334-b3b85b3b08ed\") " pod="openstack/glance-ddce-account-create-s8b2c" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.631384 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8721aea-88f2-4436-9334-b3b85b3b08ed-operator-scripts\") pod \"glance-ddce-account-create-s8b2c\" (UID: \"d8721aea-88f2-4436-9334-b3b85b3b08ed\") " pod="openstack/glance-ddce-account-create-s8b2c" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.652077 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-99qcb\" (UniqueName: \"kubernetes.io/projected/d8721aea-88f2-4436-9334-b3b85b3b08ed-kube-api-access-99qcb\") pod \"glance-ddce-account-create-s8b2c\" (UID: \"d8721aea-88f2-4436-9334-b3b85b3b08ed\") " pod="openstack/glance-ddce-account-create-s8b2c" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.743877 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-l5gbh" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.760103 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-8xdmb"] Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.768113 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-26ef-account-create-jtkcn"] Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.796405 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-ddce-account-create-s8b2c" Nov 24 13:39:21 crc kubenswrapper[5039]: I1124 13:39:21.945185 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-n9c94"] Nov 24 13:39:22 crc kubenswrapper[5039]: I1124 13:39:22.049058 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-683c-account-create-cddj7"] Nov 24 13:39:22 crc kubenswrapper[5039]: W1124 13:39:22.065625 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d87b7ee_b5fa_442b_bf05_eaf35c945ca4.slice/crio-aec337ed8185093a4bca7b491ee473a5e9fb2ee1309305ac0fe9c425b57e229e WatchSource:0}: Error finding container aec337ed8185093a4bca7b491ee473a5e9fb2ee1309305ac0fe9c425b57e229e: Status 404 returned error can't find the container with id aec337ed8185093a4bca7b491ee473a5e9fb2ee1309305ac0fe9c425b57e229e Nov 24 13:39:22 crc kubenswrapper[5039]: W1124 13:39:22.077273 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcfcfc56f_8643_4195_8d18_b3076deac9d4.slice/crio-c017909d00c76bf342065ca6df88166135be882e8427e6c12d465a6a216e0c1c WatchSource:0}: Error finding container c017909d00c76bf342065ca6df88166135be882e8427e6c12d465a6a216e0c1c: Status 404 returned error can't find the container with id c017909d00c76bf342065ca6df88166135be882e8427e6c12d465a6a216e0c1c Nov 24 13:39:22 crc kubenswrapper[5039]: W1124 13:39:22.078600 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc87e221f_b63d_4883_a80b_084a56305cb1.slice/crio-45c567bff4dd87433436a095f4d3f26f74faeca95acc5de83d8d59d49ee4b7cf WatchSource:0}: Error finding container 45c567bff4dd87433436a095f4d3f26f74faeca95acc5de83d8d59d49ee4b7cf: Status 404 returned error can't find the container with id 45c567bff4dd87433436a095f4d3f26f74faeca95acc5de83d8d59d49ee4b7cf Nov 24 13:39:22 crc kubenswrapper[5039]: W1124 13:39:22.083725 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcd84bf91_50f0_43ff_a40d_7973e2e54a0b.slice/crio-2d92a0eeaa5fa577d83b1b6d6c1fca6088efcaa982fa4d192b0ffcbd2d755a43 WatchSource:0}: Error finding container 2d92a0eeaa5fa577d83b1b6d6c1fca6088efcaa982fa4d192b0ffcbd2d755a43: Status 404 returned error can't find the container with id 2d92a0eeaa5fa577d83b1b6d6c1fca6088efcaa982fa4d192b0ffcbd2d755a43 Nov 24 13:39:22 crc kubenswrapper[5039]: I1124 13:39:22.577441 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-l5gbh"] Nov 24 13:39:22 crc kubenswrapper[5039]: W1124 13:39:22.659959 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd8721aea_88f2_4436_9334_b3b85b3b08ed.slice/crio-fb4cda1adec32d8861eb822492875e7dbb080d29ea16a09d772d4bba3c588f00 WatchSource:0}: Error finding container fb4cda1adec32d8861eb822492875e7dbb080d29ea16a09d772d4bba3c588f00: Status 404 returned error can't find the container with id fb4cda1adec32d8861eb822492875e7dbb080d29ea16a09d772d4bba3c588f00 Nov 24 13:39:22 crc kubenswrapper[5039]: I1124 13:39:22.660255 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-ddce-account-create-s8b2c"] Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.021039 5039 generic.go:334] "Generic (PLEG): container finished" podID="cd84bf91-50f0-43ff-a40d-7973e2e54a0b" containerID="71689e8cb95ceeba081d1afab87813324767d13b01edf7929255125eb22fbdfc" exitCode=0 Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.021093 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-n9c94" event={"ID":"cd84bf91-50f0-43ff-a40d-7973e2e54a0b","Type":"ContainerDied","Data":"71689e8cb95ceeba081d1afab87813324767d13b01edf7929255125eb22fbdfc"} Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.021385 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-n9c94" event={"ID":"cd84bf91-50f0-43ff-a40d-7973e2e54a0b","Type":"ContainerStarted","Data":"2d92a0eeaa5fa577d83b1b6d6c1fca6088efcaa982fa4d192b0ffcbd2d755a43"} Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.023442 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-ddce-account-create-s8b2c" event={"ID":"d8721aea-88f2-4436-9334-b3b85b3b08ed","Type":"ContainerStarted","Data":"fb4cda1adec32d8861eb822492875e7dbb080d29ea16a09d772d4bba3c588f00"} Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.025544 5039 generic.go:334] "Generic (PLEG): container finished" podID="c87e221f-b63d-4883-a80b-084a56305cb1" containerID="3652015e828e32069b619566e1c0a8c8a0ec3380ac56af19d81a06514dd12790" exitCode=0 Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.025711 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-8xdmb" event={"ID":"c87e221f-b63d-4883-a80b-084a56305cb1","Type":"ContainerDied","Data":"3652015e828e32069b619566e1c0a8c8a0ec3380ac56af19d81a06514dd12790"} Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.025889 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-8xdmb" event={"ID":"c87e221f-b63d-4883-a80b-084a56305cb1","Type":"ContainerStarted","Data":"45c567bff4dd87433436a095f4d3f26f74faeca95acc5de83d8d59d49ee4b7cf"} Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.027579 5039 generic.go:334] "Generic (PLEG): container finished" podID="3d87b7ee-b5fa-442b-bf05-eaf35c945ca4" containerID="0d8a1dd774dcd5386d094c01b4701a251605276874203e854d307191eadcb8c2" exitCode=0 Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.027684 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-26ef-account-create-jtkcn" event={"ID":"3d87b7ee-b5fa-442b-bf05-eaf35c945ca4","Type":"ContainerDied","Data":"0d8a1dd774dcd5386d094c01b4701a251605276874203e854d307191eadcb8c2"} Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.027952 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-26ef-account-create-jtkcn" event={"ID":"3d87b7ee-b5fa-442b-bf05-eaf35c945ca4","Type":"ContainerStarted","Data":"aec337ed8185093a4bca7b491ee473a5e9fb2ee1309305ac0fe9c425b57e229e"} Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.030012 5039 generic.go:334] "Generic (PLEG): container finished" podID="cfcfc56f-8643-4195-8d18-b3076deac9d4" containerID="59fc9c31e57979785ff13a5825b4efd00dc42bb056e619cac78ae9fb73a6a149" exitCode=0 Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.030181 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-683c-account-create-cddj7" event={"ID":"cfcfc56f-8643-4195-8d18-b3076deac9d4","Type":"ContainerDied","Data":"59fc9c31e57979785ff13a5825b4efd00dc42bb056e619cac78ae9fb73a6a149"} Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.030405 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-683c-account-create-cddj7" event={"ID":"cfcfc56f-8643-4195-8d18-b3076deac9d4","Type":"ContainerStarted","Data":"c017909d00c76bf342065ca6df88166135be882e8427e6c12d465a6a216e0c1c"} Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.035288 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f","Type":"ContainerStarted","Data":"3204d20471d606459102c9ececad5e0dd46923d0eb781288f2205dd4b3b6a1df"} Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.036589 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-l5gbh" event={"ID":"39bc167d-fd5d-4522-8350-8a59cd32aced","Type":"ContainerStarted","Data":"016a767abad46710f87562c36154201c4c3f5398fc5aad41e51fb36f8f5ecff5"} Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.750493 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-d56m7"] Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.752313 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-d56m7" Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.768684 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmfll\" (UniqueName: \"kubernetes.io/projected/236b7551-4b7b-4643-afe6-0bb78c880b3b-kube-api-access-jmfll\") pod \"mysqld-exporter-openstack-db-create-d56m7\" (UID: \"236b7551-4b7b-4643-afe6-0bb78c880b3b\") " pod="openstack/mysqld-exporter-openstack-db-create-d56m7" Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.768738 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/236b7551-4b7b-4643-afe6-0bb78c880b3b-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-d56m7\" (UID: \"236b7551-4b7b-4643-afe6-0bb78c880b3b\") " pod="openstack/mysqld-exporter-openstack-db-create-d56m7" Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.776653 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-d56m7"] Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.871121 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmfll\" (UniqueName: \"kubernetes.io/projected/236b7551-4b7b-4643-afe6-0bb78c880b3b-kube-api-access-jmfll\") pod \"mysqld-exporter-openstack-db-create-d56m7\" (UID: \"236b7551-4b7b-4643-afe6-0bb78c880b3b\") " pod="openstack/mysqld-exporter-openstack-db-create-d56m7" Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.871185 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/236b7551-4b7b-4643-afe6-0bb78c880b3b-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-d56m7\" (UID: \"236b7551-4b7b-4643-afe6-0bb78c880b3b\") " pod="openstack/mysqld-exporter-openstack-db-create-d56m7" Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.871904 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/236b7551-4b7b-4643-afe6-0bb78c880b3b-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-d56m7\" (UID: \"236b7551-4b7b-4643-afe6-0bb78c880b3b\") " pod="openstack/mysqld-exporter-openstack-db-create-d56m7" Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.872309 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-7c1f-account-create-hnc4b"] Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.873964 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-7c1f-account-create-hnc4b" Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.877409 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-db-secret" Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.881328 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-7c1f-account-create-hnc4b"] Nov 24 13:39:23 crc kubenswrapper[5039]: I1124 13:39:23.892695 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmfll\" (UniqueName: \"kubernetes.io/projected/236b7551-4b7b-4643-afe6-0bb78c880b3b-kube-api-access-jmfll\") pod \"mysqld-exporter-openstack-db-create-d56m7\" (UID: \"236b7551-4b7b-4643-afe6-0bb78c880b3b\") " pod="openstack/mysqld-exporter-openstack-db-create-d56m7" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.046571 5039 generic.go:334] "Generic (PLEG): container finished" podID="d8721aea-88f2-4436-9334-b3b85b3b08ed" containerID="b7b0e5e7951e46e5e5c37704f1abad05814d54068f9dc0e0be5d882c44a4a3c6" exitCode=0 Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.046647 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-ddce-account-create-s8b2c" event={"ID":"d8721aea-88f2-4436-9334-b3b85b3b08ed","Type":"ContainerDied","Data":"b7b0e5e7951e46e5e5c37704f1abad05814d54068f9dc0e0be5d882c44a4a3c6"} Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.058841 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f","Type":"ContainerStarted","Data":"63f91bc4fd74ef203911fee7441019e02f2262704abdd497c64a5f3d619cfefb"} Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.059019 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f","Type":"ContainerStarted","Data":"8b3beaa6d672a1a6e9973a1736b7d64a118b5798db76c4eeea9d2ed7def6c344"} Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.059119 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f","Type":"ContainerStarted","Data":"1eae1131b1801011e694b36ab7809da06f32c04563258513da4959ef94a9d278"} Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.061515 5039 generic.go:334] "Generic (PLEG): container finished" podID="39bc167d-fd5d-4522-8350-8a59cd32aced" containerID="146618cf69f2a66b90fb82a7947127cfa95138536f81540fbfd428fd1f89afe5" exitCode=0 Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.064191 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-l5gbh" event={"ID":"39bc167d-fd5d-4522-8350-8a59cd32aced","Type":"ContainerDied","Data":"146618cf69f2a66b90fb82a7947127cfa95138536f81540fbfd428fd1f89afe5"} Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.074543 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-d56m7" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.074854 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpj8x\" (UniqueName: \"kubernetes.io/projected/c1c38b9f-b596-43f3-83d4-9450c71fbaa6-kube-api-access-lpj8x\") pod \"mysqld-exporter-7c1f-account-create-hnc4b\" (UID: \"c1c38b9f-b596-43f3-83d4-9450c71fbaa6\") " pod="openstack/mysqld-exporter-7c1f-account-create-hnc4b" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.074927 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c1c38b9f-b596-43f3-83d4-9450c71fbaa6-operator-scripts\") pod \"mysqld-exporter-7c1f-account-create-hnc4b\" (UID: \"c1c38b9f-b596-43f3-83d4-9450c71fbaa6\") " pod="openstack/mysqld-exporter-7c1f-account-create-hnc4b" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.177328 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c1c38b9f-b596-43f3-83d4-9450c71fbaa6-operator-scripts\") pod \"mysqld-exporter-7c1f-account-create-hnc4b\" (UID: \"c1c38b9f-b596-43f3-83d4-9450c71fbaa6\") " pod="openstack/mysqld-exporter-7c1f-account-create-hnc4b" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.177656 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpj8x\" (UniqueName: \"kubernetes.io/projected/c1c38b9f-b596-43f3-83d4-9450c71fbaa6-kube-api-access-lpj8x\") pod \"mysqld-exporter-7c1f-account-create-hnc4b\" (UID: \"c1c38b9f-b596-43f3-83d4-9450c71fbaa6\") " pod="openstack/mysqld-exporter-7c1f-account-create-hnc4b" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.178440 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c1c38b9f-b596-43f3-83d4-9450c71fbaa6-operator-scripts\") pod \"mysqld-exporter-7c1f-account-create-hnc4b\" (UID: \"c1c38b9f-b596-43f3-83d4-9450c71fbaa6\") " pod="openstack/mysqld-exporter-7c1f-account-create-hnc4b" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.203936 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpj8x\" (UniqueName: \"kubernetes.io/projected/c1c38b9f-b596-43f3-83d4-9450c71fbaa6-kube-api-access-lpj8x\") pod \"mysqld-exporter-7c1f-account-create-hnc4b\" (UID: \"c1c38b9f-b596-43f3-83d4-9450c71fbaa6\") " pod="openstack/mysqld-exporter-7c1f-account-create-hnc4b" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.244737 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.244999 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="869a1d3b-808b-4a44-b300-c2fb36a07e8a" containerName="prometheus" containerID="cri-o://5ee3e91535ae6ff6ad47dbade6d8e98d9a12d0c46b4c42d1d355aa92031e2806" gracePeriod=600 Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.245071 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="869a1d3b-808b-4a44-b300-c2fb36a07e8a" containerName="thanos-sidecar" containerID="cri-o://2b0da0cd1053cb07ac6a8370d2c07235c2d94001bf1c2acbc7c295f2a6252852" gracePeriod=600 Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.245083 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="869a1d3b-808b-4a44-b300-c2fb36a07e8a" containerName="config-reloader" containerID="cri-o://5602a6ec6e447635c28660407ff827df2f9bdb8d4080912e1541a6e2233bc34a" gracePeriod=600 Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.492794 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-7c1f-account-create-hnc4b" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.649067 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-26ef-account-create-jtkcn" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.670956 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-8xdmb" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.671796 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-n9c94" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.677631 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-683c-account-create-cddj7" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.690289 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cfcfc56f-8643-4195-8d18-b3076deac9d4-operator-scripts\") pod \"cfcfc56f-8643-4195-8d18-b3076deac9d4\" (UID: \"cfcfc56f-8643-4195-8d18-b3076deac9d4\") " Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.690353 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c87e221f-b63d-4883-a80b-084a56305cb1-operator-scripts\") pod \"c87e221f-b63d-4883-a80b-084a56305cb1\" (UID: \"c87e221f-b63d-4883-a80b-084a56305cb1\") " Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.690429 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd84bf91-50f0-43ff-a40d-7973e2e54a0b-operator-scripts\") pod \"cd84bf91-50f0-43ff-a40d-7973e2e54a0b\" (UID: \"cd84bf91-50f0-43ff-a40d-7973e2e54a0b\") " Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.690532 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cdmdb\" (UniqueName: \"kubernetes.io/projected/cd84bf91-50f0-43ff-a40d-7973e2e54a0b-kube-api-access-cdmdb\") pod \"cd84bf91-50f0-43ff-a40d-7973e2e54a0b\" (UID: \"cd84bf91-50f0-43ff-a40d-7973e2e54a0b\") " Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.690665 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fdtsw\" (UniqueName: \"kubernetes.io/projected/3d87b7ee-b5fa-442b-bf05-eaf35c945ca4-kube-api-access-fdtsw\") pod \"3d87b7ee-b5fa-442b-bf05-eaf35c945ca4\" (UID: \"3d87b7ee-b5fa-442b-bf05-eaf35c945ca4\") " Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.690704 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kgrtc\" (UniqueName: \"kubernetes.io/projected/c87e221f-b63d-4883-a80b-084a56305cb1-kube-api-access-kgrtc\") pod \"c87e221f-b63d-4883-a80b-084a56305cb1\" (UID: \"c87e221f-b63d-4883-a80b-084a56305cb1\") " Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.690746 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d87b7ee-b5fa-442b-bf05-eaf35c945ca4-operator-scripts\") pod \"3d87b7ee-b5fa-442b-bf05-eaf35c945ca4\" (UID: \"3d87b7ee-b5fa-442b-bf05-eaf35c945ca4\") " Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.690776 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9pxlj\" (UniqueName: \"kubernetes.io/projected/cfcfc56f-8643-4195-8d18-b3076deac9d4-kube-api-access-9pxlj\") pod \"cfcfc56f-8643-4195-8d18-b3076deac9d4\" (UID: \"cfcfc56f-8643-4195-8d18-b3076deac9d4\") " Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.691601 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfcfc56f-8643-4195-8d18-b3076deac9d4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cfcfc56f-8643-4195-8d18-b3076deac9d4" (UID: "cfcfc56f-8643-4195-8d18-b3076deac9d4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.691675 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd84bf91-50f0-43ff-a40d-7973e2e54a0b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cd84bf91-50f0-43ff-a40d-7973e2e54a0b" (UID: "cd84bf91-50f0-43ff-a40d-7973e2e54a0b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.692283 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d87b7ee-b5fa-442b-bf05-eaf35c945ca4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3d87b7ee-b5fa-442b-bf05-eaf35c945ca4" (UID: "3d87b7ee-b5fa-442b-bf05-eaf35c945ca4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.696489 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfcfc56f-8643-4195-8d18-b3076deac9d4-kube-api-access-9pxlj" (OuterVolumeSpecName: "kube-api-access-9pxlj") pod "cfcfc56f-8643-4195-8d18-b3076deac9d4" (UID: "cfcfc56f-8643-4195-8d18-b3076deac9d4"). InnerVolumeSpecName "kube-api-access-9pxlj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.697158 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c87e221f-b63d-4883-a80b-084a56305cb1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c87e221f-b63d-4883-a80b-084a56305cb1" (UID: "c87e221f-b63d-4883-a80b-084a56305cb1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.697606 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d87b7ee-b5fa-442b-bf05-eaf35c945ca4-kube-api-access-fdtsw" (OuterVolumeSpecName: "kube-api-access-fdtsw") pod "3d87b7ee-b5fa-442b-bf05-eaf35c945ca4" (UID: "3d87b7ee-b5fa-442b-bf05-eaf35c945ca4"). InnerVolumeSpecName "kube-api-access-fdtsw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.700770 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c87e221f-b63d-4883-a80b-084a56305cb1-kube-api-access-kgrtc" (OuterVolumeSpecName: "kube-api-access-kgrtc") pod "c87e221f-b63d-4883-a80b-084a56305cb1" (UID: "c87e221f-b63d-4883-a80b-084a56305cb1"). InnerVolumeSpecName "kube-api-access-kgrtc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.700814 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd84bf91-50f0-43ff-a40d-7973e2e54a0b-kube-api-access-cdmdb" (OuterVolumeSpecName: "kube-api-access-cdmdb") pod "cd84bf91-50f0-43ff-a40d-7973e2e54a0b" (UID: "cd84bf91-50f0-43ff-a40d-7973e2e54a0b"). InnerVolumeSpecName "kube-api-access-cdmdb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.793720 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cdmdb\" (UniqueName: \"kubernetes.io/projected/cd84bf91-50f0-43ff-a40d-7973e2e54a0b-kube-api-access-cdmdb\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.793758 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fdtsw\" (UniqueName: \"kubernetes.io/projected/3d87b7ee-b5fa-442b-bf05-eaf35c945ca4-kube-api-access-fdtsw\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.793768 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kgrtc\" (UniqueName: \"kubernetes.io/projected/c87e221f-b63d-4883-a80b-084a56305cb1-kube-api-access-kgrtc\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.793779 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d87b7ee-b5fa-442b-bf05-eaf35c945ca4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.793790 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9pxlj\" (UniqueName: \"kubernetes.io/projected/cfcfc56f-8643-4195-8d18-b3076deac9d4-kube-api-access-9pxlj\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.793801 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cfcfc56f-8643-4195-8d18-b3076deac9d4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.793812 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c87e221f-b63d-4883-a80b-084a56305cb1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.793823 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd84bf91-50f0-43ff-a40d-7973e2e54a0b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:24 crc kubenswrapper[5039]: I1124 13:39:24.831765 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-d56m7"] Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.007198 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-7c1f-account-create-hnc4b"] Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.071334 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-683c-account-create-cddj7" event={"ID":"cfcfc56f-8643-4195-8d18-b3076deac9d4","Type":"ContainerDied","Data":"c017909d00c76bf342065ca6df88166135be882e8427e6c12d465a6a216e0c1c"} Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.071380 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-683c-account-create-cddj7" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.071398 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c017909d00c76bf342065ca6df88166135be882e8427e6c12d465a6a216e0c1c" Nov 24 13:39:25 crc kubenswrapper[5039]: W1124 13:39:25.095988 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc1c38b9f_b596_43f3_83d4_9450c71fbaa6.slice/crio-31c7732fc0a2a6b75f8b2054ad0c9a01009afafdea70a413870074a93b2e55b9 WatchSource:0}: Error finding container 31c7732fc0a2a6b75f8b2054ad0c9a01009afafdea70a413870074a93b2e55b9: Status 404 returned error can't find the container with id 31c7732fc0a2a6b75f8b2054ad0c9a01009afafdea70a413870074a93b2e55b9 Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.099004 5039 generic.go:334] "Generic (PLEG): container finished" podID="869a1d3b-808b-4a44-b300-c2fb36a07e8a" containerID="2b0da0cd1053cb07ac6a8370d2c07235c2d94001bf1c2acbc7c295f2a6252852" exitCode=0 Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.099032 5039 generic.go:334] "Generic (PLEG): container finished" podID="869a1d3b-808b-4a44-b300-c2fb36a07e8a" containerID="5602a6ec6e447635c28660407ff827df2f9bdb8d4080912e1541a6e2233bc34a" exitCode=0 Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.099040 5039 generic.go:334] "Generic (PLEG): container finished" podID="869a1d3b-808b-4a44-b300-c2fb36a07e8a" containerID="5ee3e91535ae6ff6ad47dbade6d8e98d9a12d0c46b4c42d1d355aa92031e2806" exitCode=0 Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.099103 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"869a1d3b-808b-4a44-b300-c2fb36a07e8a","Type":"ContainerDied","Data":"2b0da0cd1053cb07ac6a8370d2c07235c2d94001bf1c2acbc7c295f2a6252852"} Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.099129 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"869a1d3b-808b-4a44-b300-c2fb36a07e8a","Type":"ContainerDied","Data":"5602a6ec6e447635c28660407ff827df2f9bdb8d4080912e1541a6e2233bc34a"} Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.099156 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"869a1d3b-808b-4a44-b300-c2fb36a07e8a","Type":"ContainerDied","Data":"5ee3e91535ae6ff6ad47dbade6d8e98d9a12d0c46b4c42d1d355aa92031e2806"} Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.111533 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-8xdmb" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.112085 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-8xdmb" event={"ID":"c87e221f-b63d-4883-a80b-084a56305cb1","Type":"ContainerDied","Data":"45c567bff4dd87433436a095f4d3f26f74faeca95acc5de83d8d59d49ee4b7cf"} Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.112125 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="45c567bff4dd87433436a095f4d3f26f74faeca95acc5de83d8d59d49ee4b7cf" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.114522 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-26ef-account-create-jtkcn" event={"ID":"3d87b7ee-b5fa-442b-bf05-eaf35c945ca4","Type":"ContainerDied","Data":"aec337ed8185093a4bca7b491ee473a5e9fb2ee1309305ac0fe9c425b57e229e"} Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.114548 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aec337ed8185093a4bca7b491ee473a5e9fb2ee1309305ac0fe9c425b57e229e" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.114595 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-26ef-account-create-jtkcn" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.123455 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-n9c94" event={"ID":"cd84bf91-50f0-43ff-a40d-7973e2e54a0b","Type":"ContainerDied","Data":"2d92a0eeaa5fa577d83b1b6d6c1fca6088efcaa982fa4d192b0ffcbd2d755a43"} Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.123494 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2d92a0eeaa5fa577d83b1b6d6c1fca6088efcaa982fa4d192b0ffcbd2d755a43" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.123461 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-n9c94" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.204327 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.300766 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/869a1d3b-808b-4a44-b300-c2fb36a07e8a-web-config\") pod \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.301054 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/869a1d3b-808b-4a44-b300-c2fb36a07e8a-tls-assets\") pod \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.301133 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc\") pod \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.301171 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2h7ww\" (UniqueName: \"kubernetes.io/projected/869a1d3b-808b-4a44-b300-c2fb36a07e8a-kube-api-access-2h7ww\") pod \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.301227 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/869a1d3b-808b-4a44-b300-c2fb36a07e8a-thanos-prometheus-http-client-file\") pod \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.301254 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/869a1d3b-808b-4a44-b300-c2fb36a07e8a-config-out\") pod \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.301282 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/869a1d3b-808b-4a44-b300-c2fb36a07e8a-prometheus-metric-storage-rulefiles-0\") pod \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.301338 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/869a1d3b-808b-4a44-b300-c2fb36a07e8a-config\") pod \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\" (UID: \"869a1d3b-808b-4a44-b300-c2fb36a07e8a\") " Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.306216 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/869a1d3b-808b-4a44-b300-c2fb36a07e8a-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "869a1d3b-808b-4a44-b300-c2fb36a07e8a" (UID: "869a1d3b-808b-4a44-b300-c2fb36a07e8a"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.310731 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/869a1d3b-808b-4a44-b300-c2fb36a07e8a-config" (OuterVolumeSpecName: "config") pod "869a1d3b-808b-4a44-b300-c2fb36a07e8a" (UID: "869a1d3b-808b-4a44-b300-c2fb36a07e8a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.311099 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/869a1d3b-808b-4a44-b300-c2fb36a07e8a-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "869a1d3b-808b-4a44-b300-c2fb36a07e8a" (UID: "869a1d3b-808b-4a44-b300-c2fb36a07e8a"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.321168 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/869a1d3b-808b-4a44-b300-c2fb36a07e8a-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "869a1d3b-808b-4a44-b300-c2fb36a07e8a" (UID: "869a1d3b-808b-4a44-b300-c2fb36a07e8a"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.326118 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/869a1d3b-808b-4a44-b300-c2fb36a07e8a-kube-api-access-2h7ww" (OuterVolumeSpecName: "kube-api-access-2h7ww") pod "869a1d3b-808b-4a44-b300-c2fb36a07e8a" (UID: "869a1d3b-808b-4a44-b300-c2fb36a07e8a"). InnerVolumeSpecName "kube-api-access-2h7ww". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.331280 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/869a1d3b-808b-4a44-b300-c2fb36a07e8a-config-out" (OuterVolumeSpecName: "config-out") pod "869a1d3b-808b-4a44-b300-c2fb36a07e8a" (UID: "869a1d3b-808b-4a44-b300-c2fb36a07e8a"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.347106 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "869a1d3b-808b-4a44-b300-c2fb36a07e8a" (UID: "869a1d3b-808b-4a44-b300-c2fb36a07e8a"). InnerVolumeSpecName "pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.371434 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/869a1d3b-808b-4a44-b300-c2fb36a07e8a-web-config" (OuterVolumeSpecName: "web-config") pod "869a1d3b-808b-4a44-b300-c2fb36a07e8a" (UID: "869a1d3b-808b-4a44-b300-c2fb36a07e8a"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.410162 5039 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/869a1d3b-808b-4a44-b300-c2fb36a07e8a-web-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.410209 5039 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/869a1d3b-808b-4a44-b300-c2fb36a07e8a-tls-assets\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.410237 5039 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc\") on node \"crc\" " Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.410252 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2h7ww\" (UniqueName: \"kubernetes.io/projected/869a1d3b-808b-4a44-b300-c2fb36a07e8a-kube-api-access-2h7ww\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.410280 5039 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/869a1d3b-808b-4a44-b300-c2fb36a07e8a-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.410289 5039 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/869a1d3b-808b-4a44-b300-c2fb36a07e8a-config-out\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.410298 5039 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/869a1d3b-808b-4a44-b300-c2fb36a07e8a-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.410307 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/869a1d3b-808b-4a44-b300-c2fb36a07e8a-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.522454 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-l5gbh" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.525087 5039 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.525273 5039 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc") on node "crc" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.615008 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k9z2d\" (UniqueName: \"kubernetes.io/projected/39bc167d-fd5d-4522-8350-8a59cd32aced-kube-api-access-k9z2d\") pod \"39bc167d-fd5d-4522-8350-8a59cd32aced\" (UID: \"39bc167d-fd5d-4522-8350-8a59cd32aced\") " Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.615106 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39bc167d-fd5d-4522-8350-8a59cd32aced-operator-scripts\") pod \"39bc167d-fd5d-4522-8350-8a59cd32aced\" (UID: \"39bc167d-fd5d-4522-8350-8a59cd32aced\") " Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.615528 5039 reconciler_common.go:293] "Volume detached for volume \"pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.616009 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39bc167d-fd5d-4522-8350-8a59cd32aced-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "39bc167d-fd5d-4522-8350-8a59cd32aced" (UID: "39bc167d-fd5d-4522-8350-8a59cd32aced"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.621401 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39bc167d-fd5d-4522-8350-8a59cd32aced-kube-api-access-k9z2d" (OuterVolumeSpecName: "kube-api-access-k9z2d") pod "39bc167d-fd5d-4522-8350-8a59cd32aced" (UID: "39bc167d-fd5d-4522-8350-8a59cd32aced"). InnerVolumeSpecName "kube-api-access-k9z2d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.717101 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39bc167d-fd5d-4522-8350-8a59cd32aced-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.717442 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k9z2d\" (UniqueName: \"kubernetes.io/projected/39bc167d-fd5d-4522-8350-8a59cd32aced-kube-api-access-k9z2d\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.879216 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-ddce-account-create-s8b2c" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.920131 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8721aea-88f2-4436-9334-b3b85b3b08ed-operator-scripts\") pod \"d8721aea-88f2-4436-9334-b3b85b3b08ed\" (UID: \"d8721aea-88f2-4436-9334-b3b85b3b08ed\") " Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.920215 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-99qcb\" (UniqueName: \"kubernetes.io/projected/d8721aea-88f2-4436-9334-b3b85b3b08ed-kube-api-access-99qcb\") pod \"d8721aea-88f2-4436-9334-b3b85b3b08ed\" (UID: \"d8721aea-88f2-4436-9334-b3b85b3b08ed\") " Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.921018 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8721aea-88f2-4436-9334-b3b85b3b08ed-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d8721aea-88f2-4436-9334-b3b85b3b08ed" (UID: "d8721aea-88f2-4436-9334-b3b85b3b08ed"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:25 crc kubenswrapper[5039]: I1124 13:39:25.929173 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8721aea-88f2-4436-9334-b3b85b3b08ed-kube-api-access-99qcb" (OuterVolumeSpecName: "kube-api-access-99qcb") pod "d8721aea-88f2-4436-9334-b3b85b3b08ed" (UID: "d8721aea-88f2-4436-9334-b3b85b3b08ed"). InnerVolumeSpecName "kube-api-access-99qcb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.022693 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8721aea-88f2-4436-9334-b3b85b3b08ed-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.022729 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-99qcb\" (UniqueName: \"kubernetes.io/projected/d8721aea-88f2-4436-9334-b3b85b3b08ed-kube-api-access-99qcb\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.139244 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f","Type":"ContainerStarted","Data":"938364d7cd9ab74683263a13c9415d102145e4881074d2e97e0540dc0462d65e"} Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.139287 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f","Type":"ContainerStarted","Data":"d25572e131c6ac8084dc2789c89869e283e3bf2d1b58323e0c2101991fd9e60d"} Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.145926 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-l5gbh" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.145949 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-l5gbh" event={"ID":"39bc167d-fd5d-4522-8350-8a59cd32aced","Type":"ContainerDied","Data":"016a767abad46710f87562c36154201c4c3f5398fc5aad41e51fb36f8f5ecff5"} Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.146003 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="016a767abad46710f87562c36154201c4c3f5398fc5aad41e51fb36f8f5ecff5" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.148369 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-ddce-account-create-s8b2c" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.148366 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-ddce-account-create-s8b2c" event={"ID":"d8721aea-88f2-4436-9334-b3b85b3b08ed","Type":"ContainerDied","Data":"fb4cda1adec32d8861eb822492875e7dbb080d29ea16a09d772d4bba3c588f00"} Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.148539 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fb4cda1adec32d8861eb822492875e7dbb080d29ea16a09d772d4bba3c588f00" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.151440 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"869a1d3b-808b-4a44-b300-c2fb36a07e8a","Type":"ContainerDied","Data":"72bb41cc9891f48cd49424f09bb45de761eafe79011de41ed70441a2461d3a03"} Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.151494 5039 scope.go:117] "RemoveContainer" containerID="2b0da0cd1053cb07ac6a8370d2c07235c2d94001bf1c2acbc7c295f2a6252852" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.151684 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.155193 5039 generic.go:334] "Generic (PLEG): container finished" podID="c1c38b9f-b596-43f3-83d4-9450c71fbaa6" containerID="084452c6f5566811db74a3e646de966ab89f1bb00652cc545c4f9ef039dd9133" exitCode=0 Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.155268 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-7c1f-account-create-hnc4b" event={"ID":"c1c38b9f-b596-43f3-83d4-9450c71fbaa6","Type":"ContainerDied","Data":"084452c6f5566811db74a3e646de966ab89f1bb00652cc545c4f9ef039dd9133"} Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.155301 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-7c1f-account-create-hnc4b" event={"ID":"c1c38b9f-b596-43f3-83d4-9450c71fbaa6","Type":"ContainerStarted","Data":"31c7732fc0a2a6b75f8b2054ad0c9a01009afafdea70a413870074a93b2e55b9"} Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.158675 5039 generic.go:334] "Generic (PLEG): container finished" podID="236b7551-4b7b-4643-afe6-0bb78c880b3b" containerID="d63debaf15a8e0d7912888d168410b64e2023f34b3992cb476eb43d389a4f94b" exitCode=0 Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.158732 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-d56m7" event={"ID":"236b7551-4b7b-4643-afe6-0bb78c880b3b","Type":"ContainerDied","Data":"d63debaf15a8e0d7912888d168410b64e2023f34b3992cb476eb43d389a4f94b"} Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.158811 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-d56m7" event={"ID":"236b7551-4b7b-4643-afe6-0bb78c880b3b","Type":"ContainerStarted","Data":"4f3e08507c8df4af62abee64e98720d30269192a2e4bf11233e58d42f4fdb6c9"} Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.178779 5039 scope.go:117] "RemoveContainer" containerID="5602a6ec6e447635c28660407ff827df2f9bdb8d4080912e1541a6e2233bc34a" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.212492 5039 scope.go:117] "RemoveContainer" containerID="5ee3e91535ae6ff6ad47dbade6d8e98d9a12d0c46b4c42d1d355aa92031e2806" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.217921 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.235688 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.239063 5039 scope.go:117] "RemoveContainer" containerID="dade2c8a2b5700d9f5635d510ff78ad8e50a7ecd29fb2bd3c7e68e7df9a834ff" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.244691 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 24 13:39:26 crc kubenswrapper[5039]: E1124 13:39:26.245066 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d87b7ee-b5fa-442b-bf05-eaf35c945ca4" containerName="mariadb-account-create" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.245085 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d87b7ee-b5fa-442b-bf05-eaf35c945ca4" containerName="mariadb-account-create" Nov 24 13:39:26 crc kubenswrapper[5039]: E1124 13:39:26.245102 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="869a1d3b-808b-4a44-b300-c2fb36a07e8a" containerName="init-config-reloader" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.245110 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="869a1d3b-808b-4a44-b300-c2fb36a07e8a" containerName="init-config-reloader" Nov 24 13:39:26 crc kubenswrapper[5039]: E1124 13:39:26.245126 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8721aea-88f2-4436-9334-b3b85b3b08ed" containerName="mariadb-account-create" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.245135 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8721aea-88f2-4436-9334-b3b85b3b08ed" containerName="mariadb-account-create" Nov 24 13:39:26 crc kubenswrapper[5039]: E1124 13:39:26.245142 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd84bf91-50f0-43ff-a40d-7973e2e54a0b" containerName="mariadb-database-create" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.245149 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd84bf91-50f0-43ff-a40d-7973e2e54a0b" containerName="mariadb-database-create" Nov 24 13:39:26 crc kubenswrapper[5039]: E1124 13:39:26.245163 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="869a1d3b-808b-4a44-b300-c2fb36a07e8a" containerName="prometheus" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.245168 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="869a1d3b-808b-4a44-b300-c2fb36a07e8a" containerName="prometheus" Nov 24 13:39:26 crc kubenswrapper[5039]: E1124 13:39:26.245182 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c87e221f-b63d-4883-a80b-084a56305cb1" containerName="mariadb-database-create" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.245188 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="c87e221f-b63d-4883-a80b-084a56305cb1" containerName="mariadb-database-create" Nov 24 13:39:26 crc kubenswrapper[5039]: E1124 13:39:26.245198 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfcfc56f-8643-4195-8d18-b3076deac9d4" containerName="mariadb-account-create" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.245205 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfcfc56f-8643-4195-8d18-b3076deac9d4" containerName="mariadb-account-create" Nov 24 13:39:26 crc kubenswrapper[5039]: E1124 13:39:26.245215 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="869a1d3b-808b-4a44-b300-c2fb36a07e8a" containerName="thanos-sidecar" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.245221 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="869a1d3b-808b-4a44-b300-c2fb36a07e8a" containerName="thanos-sidecar" Nov 24 13:39:26 crc kubenswrapper[5039]: E1124 13:39:26.245230 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="869a1d3b-808b-4a44-b300-c2fb36a07e8a" containerName="config-reloader" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.245235 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="869a1d3b-808b-4a44-b300-c2fb36a07e8a" containerName="config-reloader" Nov 24 13:39:26 crc kubenswrapper[5039]: E1124 13:39:26.245254 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39bc167d-fd5d-4522-8350-8a59cd32aced" containerName="mariadb-database-create" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.245260 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="39bc167d-fd5d-4522-8350-8a59cd32aced" containerName="mariadb-database-create" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.245468 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd84bf91-50f0-43ff-a40d-7973e2e54a0b" containerName="mariadb-database-create" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.245489 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="869a1d3b-808b-4a44-b300-c2fb36a07e8a" containerName="config-reloader" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.245552 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="869a1d3b-808b-4a44-b300-c2fb36a07e8a" containerName="thanos-sidecar" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.245570 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfcfc56f-8643-4195-8d18-b3076deac9d4" containerName="mariadb-account-create" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.245584 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8721aea-88f2-4436-9334-b3b85b3b08ed" containerName="mariadb-account-create" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.245594 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d87b7ee-b5fa-442b-bf05-eaf35c945ca4" containerName="mariadb-account-create" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.245617 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="869a1d3b-808b-4a44-b300-c2fb36a07e8a" containerName="prometheus" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.245628 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="39bc167d-fd5d-4522-8350-8a59cd32aced" containerName="mariadb-database-create" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.245642 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="c87e221f-b63d-4883-a80b-084a56305cb1" containerName="mariadb-database-create" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.247285 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.249373 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-w4dz7" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.249646 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.250561 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.250722 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.250835 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.251825 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.263723 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.302946 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.325280 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="869a1d3b-808b-4a44-b300-c2fb36a07e8a" path="/var/lib/kubelet/pods/869a1d3b-808b-4a44-b300-c2fb36a07e8a/volumes" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.328257 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/54819035-007f-4162-9419-d825f50e1ce9-config\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.328362 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.328421 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/54819035-007f-4162-9419-d825f50e1ce9-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.328518 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/54819035-007f-4162-9419-d825f50e1ce9-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.328587 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/54819035-007f-4162-9419-d825f50e1ce9-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.328670 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sb8kt\" (UniqueName: \"kubernetes.io/projected/54819035-007f-4162-9419-d825f50e1ce9-kube-api-access-sb8kt\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.328716 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54819035-007f-4162-9419-d825f50e1ce9-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.331605 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/54819035-007f-4162-9419-d825f50e1ce9-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.332340 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/54819035-007f-4162-9419-d825f50e1ce9-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.332376 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/54819035-007f-4162-9419-d825f50e1ce9-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.332402 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/54819035-007f-4162-9419-d825f50e1ce9-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.434480 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sb8kt\" (UniqueName: \"kubernetes.io/projected/54819035-007f-4162-9419-d825f50e1ce9-kube-api-access-sb8kt\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.434545 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54819035-007f-4162-9419-d825f50e1ce9-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.434572 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/54819035-007f-4162-9419-d825f50e1ce9-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.434624 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/54819035-007f-4162-9419-d825f50e1ce9-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.434652 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/54819035-007f-4162-9419-d825f50e1ce9-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.434695 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/54819035-007f-4162-9419-d825f50e1ce9-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.434723 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/54819035-007f-4162-9419-d825f50e1ce9-config\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.434749 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.434798 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/54819035-007f-4162-9419-d825f50e1ce9-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.434873 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/54819035-007f-4162-9419-d825f50e1ce9-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.434924 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/54819035-007f-4162-9419-d825f50e1ce9-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.438387 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/54819035-007f-4162-9419-d825f50e1ce9-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.438515 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/54819035-007f-4162-9419-d825f50e1ce9-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.439136 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/54819035-007f-4162-9419-d825f50e1ce9-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.439604 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54819035-007f-4162-9419-d825f50e1ce9-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.439677 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/54819035-007f-4162-9419-d825f50e1ce9-config\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.459669 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/54819035-007f-4162-9419-d825f50e1ce9-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.459698 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/54819035-007f-4162-9419-d825f50e1ce9-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.460533 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/54819035-007f-4162-9419-d825f50e1ce9-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.461035 5039 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.461187 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f0fdd26626b161b6dd0cbc6930a2dd292876373bbfce0d53987e82de5f5e1a8b/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.462439 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/54819035-007f-4162-9419-d825f50e1ce9-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.468834 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sb8kt\" (UniqueName: \"kubernetes.io/projected/54819035-007f-4162-9419-d825f50e1ce9-kube-api-access-sb8kt\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.505319 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aa3bc8bd-7941-4764-9d9c-bc9464f639dc\") pod \"prometheus-metric-storage-0\" (UID: \"54819035-007f-4162-9419-d825f50e1ce9\") " pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:26 crc kubenswrapper[5039]: I1124 13:39:26.621698 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.140440 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.182396 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"54819035-007f-4162-9419-d825f50e1ce9","Type":"ContainerStarted","Data":"c6d74dc5bed183f82cae22c99069e3b01cd3a11ff8a0795a9a84b1617b6ff32b"} Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.194415 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f","Type":"ContainerStarted","Data":"7d331b868e2db5a1f1ba756e59bd4c35ed0031640a5c6092f5cd67091db24d57"} Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.194474 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f","Type":"ContainerStarted","Data":"c55b18a560ac9a54aab6ff9b2479f200bb4c7f8cff9cca80d6ff0150b0e99f29"} Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.194488 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f","Type":"ContainerStarted","Data":"a5b581b2881f4a5237fb1df8a1fb505cdbc23106c89afc5c5a6012835fcdfd48"} Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.194535 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f","Type":"ContainerStarted","Data":"fc354eeea69f9dfe597b4728c9c0d6e26e4cdb032311eabf2f1742b595050043"} Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.494850 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-d56m7" Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.533828 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-7c1f-account-create-hnc4b" Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.564065 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lpj8x\" (UniqueName: \"kubernetes.io/projected/c1c38b9f-b596-43f3-83d4-9450c71fbaa6-kube-api-access-lpj8x\") pod \"c1c38b9f-b596-43f3-83d4-9450c71fbaa6\" (UID: \"c1c38b9f-b596-43f3-83d4-9450c71fbaa6\") " Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.564112 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c1c38b9f-b596-43f3-83d4-9450c71fbaa6-operator-scripts\") pod \"c1c38b9f-b596-43f3-83d4-9450c71fbaa6\" (UID: \"c1c38b9f-b596-43f3-83d4-9450c71fbaa6\") " Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.564316 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/236b7551-4b7b-4643-afe6-0bb78c880b3b-operator-scripts\") pod \"236b7551-4b7b-4643-afe6-0bb78c880b3b\" (UID: \"236b7551-4b7b-4643-afe6-0bb78c880b3b\") " Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.564374 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jmfll\" (UniqueName: \"kubernetes.io/projected/236b7551-4b7b-4643-afe6-0bb78c880b3b-kube-api-access-jmfll\") pod \"236b7551-4b7b-4643-afe6-0bb78c880b3b\" (UID: \"236b7551-4b7b-4643-afe6-0bb78c880b3b\") " Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.569874 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/236b7551-4b7b-4643-afe6-0bb78c880b3b-kube-api-access-jmfll" (OuterVolumeSpecName: "kube-api-access-jmfll") pod "236b7551-4b7b-4643-afe6-0bb78c880b3b" (UID: "236b7551-4b7b-4643-afe6-0bb78c880b3b"). InnerVolumeSpecName "kube-api-access-jmfll". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.570359 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/236b7551-4b7b-4643-afe6-0bb78c880b3b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "236b7551-4b7b-4643-afe6-0bb78c880b3b" (UID: "236b7551-4b7b-4643-afe6-0bb78c880b3b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.570408 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c1c38b9f-b596-43f3-83d4-9450c71fbaa6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c1c38b9f-b596-43f3-83d4-9450c71fbaa6" (UID: "c1c38b9f-b596-43f3-83d4-9450c71fbaa6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.572378 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.572750 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1c38b9f-b596-43f3-83d4-9450c71fbaa6-kube-api-access-lpj8x" (OuterVolumeSpecName: "kube-api-access-lpj8x") pod "c1c38b9f-b596-43f3-83d4-9450c71fbaa6" (UID: "c1c38b9f-b596-43f3-83d4-9450c71fbaa6"). InnerVolumeSpecName "kube-api-access-lpj8x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.666719 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jmfll\" (UniqueName: \"kubernetes.io/projected/236b7551-4b7b-4643-afe6-0bb78c880b3b-kube-api-access-jmfll\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.666760 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lpj8x\" (UniqueName: \"kubernetes.io/projected/c1c38b9f-b596-43f3-83d4-9450c71fbaa6-kube-api-access-lpj8x\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.666773 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c1c38b9f-b596-43f3-83d4-9450c71fbaa6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.666785 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/236b7551-4b7b-4643-afe6-0bb78c880b3b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.906393 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-f9m74"] Nov 24 13:39:27 crc kubenswrapper[5039]: E1124 13:39:27.907183 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1c38b9f-b596-43f3-83d4-9450c71fbaa6" containerName="mariadb-account-create" Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.907211 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1c38b9f-b596-43f3-83d4-9450c71fbaa6" containerName="mariadb-account-create" Nov 24 13:39:27 crc kubenswrapper[5039]: E1124 13:39:27.907257 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="236b7551-4b7b-4643-afe6-0bb78c880b3b" containerName="mariadb-database-create" Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.907265 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="236b7551-4b7b-4643-afe6-0bb78c880b3b" containerName="mariadb-database-create" Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.907484 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1c38b9f-b596-43f3-83d4-9450c71fbaa6" containerName="mariadb-account-create" Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.907539 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="236b7551-4b7b-4643-afe6-0bb78c880b3b" containerName="mariadb-database-create" Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.908328 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-f9m74" Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.928984 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-f9m74"] Nov 24 13:39:27 crc kubenswrapper[5039]: I1124 13:39:27.982714 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.013432 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-bdknn"] Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.014789 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-bdknn" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.023783 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-8c2b-account-create-dcfff"] Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.025361 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-8c2b-account-create-dcfff" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.029298 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.036416 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-bdknn"] Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.044166 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="869a1d3b-808b-4a44-b300-c2fb36a07e8a" containerName="prometheus" probeResult="failure" output="Get \"http://10.217.0.130:9090/-/ready\": dial tcp 10.217.0.130:9090: i/o timeout (Client.Timeout exceeded while awaiting headers)" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.046668 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-8c2b-account-create-dcfff"] Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.072389 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4mzhc\" (UniqueName: \"kubernetes.io/projected/739c6211-e4b4-4386-9d63-c9b680eb9114-kube-api-access-4mzhc\") pod \"heat-db-create-f9m74\" (UID: \"739c6211-e4b4-4386-9d63-c9b680eb9114\") " pod="openstack/heat-db-create-f9m74" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.072626 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/739c6211-e4b4-4386-9d63-c9b680eb9114-operator-scripts\") pod \"heat-db-create-f9m74\" (UID: \"739c6211-e4b4-4386-9d63-c9b680eb9114\") " pod="openstack/heat-db-create-f9m74" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.134082 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-h8pnd"] Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.135471 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-h8pnd" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.171610 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-d1ff-account-create-xsvvf"] Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.172862 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-d1ff-account-create-xsvvf" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.174398 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7xbs\" (UniqueName: \"kubernetes.io/projected/c56ef76f-5741-4430-8973-4c035fc82525-kube-api-access-q7xbs\") pod \"cinder-db-create-bdknn\" (UID: \"c56ef76f-5741-4430-8973-4c035fc82525\") " pod="openstack/cinder-db-create-bdknn" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.174513 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/739c6211-e4b4-4386-9d63-c9b680eb9114-operator-scripts\") pod \"heat-db-create-f9m74\" (UID: \"739c6211-e4b4-4386-9d63-c9b680eb9114\") " pod="openstack/heat-db-create-f9m74" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.174537 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d373e37d-246d-4b90-863b-b224a059c4e1-operator-scripts\") pod \"cinder-8c2b-account-create-dcfff\" (UID: \"d373e37d-246d-4b90-863b-b224a059c4e1\") " pod="openstack/cinder-8c2b-account-create-dcfff" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.174562 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4mzhc\" (UniqueName: \"kubernetes.io/projected/739c6211-e4b4-4386-9d63-c9b680eb9114-kube-api-access-4mzhc\") pod \"heat-db-create-f9m74\" (UID: \"739c6211-e4b4-4386-9d63-c9b680eb9114\") " pod="openstack/heat-db-create-f9m74" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.174614 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnplt\" (UniqueName: \"kubernetes.io/projected/d373e37d-246d-4b90-863b-b224a059c4e1-kube-api-access-hnplt\") pod \"cinder-8c2b-account-create-dcfff\" (UID: \"d373e37d-246d-4b90-863b-b224a059c4e1\") " pod="openstack/cinder-8c2b-account-create-dcfff" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.174664 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c56ef76f-5741-4430-8973-4c035fc82525-operator-scripts\") pod \"cinder-db-create-bdknn\" (UID: \"c56ef76f-5741-4430-8973-4c035fc82525\") " pod="openstack/cinder-db-create-bdknn" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.175994 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/739c6211-e4b4-4386-9d63-c9b680eb9114-operator-scripts\") pod \"heat-db-create-f9m74\" (UID: \"739c6211-e4b4-4386-9d63-c9b680eb9114\") " pod="openstack/heat-db-create-f9m74" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.186886 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.197817 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-h8pnd"] Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.210227 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-d1ff-account-create-xsvvf"] Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.265579 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f","Type":"ContainerStarted","Data":"6d6b7c9d980786178595b9708c082d11aa7d888f66aa17ac34b01270b327cc73"} Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.286398 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9542p\" (UniqueName: \"kubernetes.io/projected/0016dd31-1097-438e-9197-bd3f5c9659d3-kube-api-access-9542p\") pod \"barbican-db-create-h8pnd\" (UID: \"0016dd31-1097-438e-9197-bd3f5c9659d3\") " pod="openstack/barbican-db-create-h8pnd" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.286698 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c56ef76f-5741-4430-8973-4c035fc82525-operator-scripts\") pod \"cinder-db-create-bdknn\" (UID: \"c56ef76f-5741-4430-8973-4c035fc82525\") " pod="openstack/cinder-db-create-bdknn" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.286808 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7672\" (UniqueName: \"kubernetes.io/projected/b861c52a-7fd3-4027-931a-624b4149e21b-kube-api-access-k7672\") pod \"heat-d1ff-account-create-xsvvf\" (UID: \"b861c52a-7fd3-4027-931a-624b4149e21b\") " pod="openstack/heat-d1ff-account-create-xsvvf" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.286898 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7xbs\" (UniqueName: \"kubernetes.io/projected/c56ef76f-5741-4430-8973-4c035fc82525-kube-api-access-q7xbs\") pod \"cinder-db-create-bdknn\" (UID: \"c56ef76f-5741-4430-8973-4c035fc82525\") " pod="openstack/cinder-db-create-bdknn" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.286979 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0016dd31-1097-438e-9197-bd3f5c9659d3-operator-scripts\") pod \"barbican-db-create-h8pnd\" (UID: \"0016dd31-1097-438e-9197-bd3f5c9659d3\") " pod="openstack/barbican-db-create-h8pnd" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.287113 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d373e37d-246d-4b90-863b-b224a059c4e1-operator-scripts\") pod \"cinder-8c2b-account-create-dcfff\" (UID: \"d373e37d-246d-4b90-863b-b224a059c4e1\") " pod="openstack/cinder-8c2b-account-create-dcfff" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.290186 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnplt\" (UniqueName: \"kubernetes.io/projected/d373e37d-246d-4b90-863b-b224a059c4e1-kube-api-access-hnplt\") pod \"cinder-8c2b-account-create-dcfff\" (UID: \"d373e37d-246d-4b90-863b-b224a059c4e1\") " pod="openstack/cinder-8c2b-account-create-dcfff" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.290193 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c56ef76f-5741-4430-8973-4c035fc82525-operator-scripts\") pod \"cinder-db-create-bdknn\" (UID: \"c56ef76f-5741-4430-8973-4c035fc82525\") " pod="openstack/cinder-db-create-bdknn" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.290270 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d373e37d-246d-4b90-863b-b224a059c4e1-operator-scripts\") pod \"cinder-8c2b-account-create-dcfff\" (UID: \"d373e37d-246d-4b90-863b-b224a059c4e1\") " pod="openstack/cinder-8c2b-account-create-dcfff" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.290426 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-7c1f-account-create-hnc4b" event={"ID":"c1c38b9f-b596-43f3-83d4-9450c71fbaa6","Type":"ContainerDied","Data":"31c7732fc0a2a6b75f8b2054ad0c9a01009afafdea70a413870074a93b2e55b9"} Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.290461 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="31c7732fc0a2a6b75f8b2054ad0c9a01009afafdea70a413870074a93b2e55b9" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.290481 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-7c1f-account-create-hnc4b" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.291642 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b861c52a-7fd3-4027-931a-624b4149e21b-operator-scripts\") pod \"heat-d1ff-account-create-xsvvf\" (UID: \"b861c52a-7fd3-4027-931a-624b4149e21b\") " pod="openstack/heat-d1ff-account-create-xsvvf" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.292833 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-d56m7" event={"ID":"236b7551-4b7b-4643-afe6-0bb78c880b3b","Type":"ContainerDied","Data":"4f3e08507c8df4af62abee64e98720d30269192a2e4bf11233e58d42f4fdb6c9"} Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.293034 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4f3e08507c8df4af62abee64e98720d30269192a2e4bf11233e58d42f4fdb6c9" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.292990 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-d56m7" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.333203 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7xbs\" (UniqueName: \"kubernetes.io/projected/c56ef76f-5741-4430-8973-4c035fc82525-kube-api-access-q7xbs\") pod \"cinder-db-create-bdknn\" (UID: \"c56ef76f-5741-4430-8973-4c035fc82525\") " pod="openstack/cinder-db-create-bdknn" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.343079 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4mzhc\" (UniqueName: \"kubernetes.io/projected/739c6211-e4b4-4386-9d63-c9b680eb9114-kube-api-access-4mzhc\") pod \"heat-db-create-f9m74\" (UID: \"739c6211-e4b4-4386-9d63-c9b680eb9114\") " pod="openstack/heat-db-create-f9m74" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.357262 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-bdknn" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.362158 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnplt\" (UniqueName: \"kubernetes.io/projected/d373e37d-246d-4b90-863b-b224a059c4e1-kube-api-access-hnplt\") pod \"cinder-8c2b-account-create-dcfff\" (UID: \"d373e37d-246d-4b90-863b-b224a059c4e1\") " pod="openstack/cinder-8c2b-account-create-dcfff" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.393903 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b861c52a-7fd3-4027-931a-624b4149e21b-operator-scripts\") pod \"heat-d1ff-account-create-xsvvf\" (UID: \"b861c52a-7fd3-4027-931a-624b4149e21b\") " pod="openstack/heat-d1ff-account-create-xsvvf" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.393973 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9542p\" (UniqueName: \"kubernetes.io/projected/0016dd31-1097-438e-9197-bd3f5c9659d3-kube-api-access-9542p\") pod \"barbican-db-create-h8pnd\" (UID: \"0016dd31-1097-438e-9197-bd3f5c9659d3\") " pod="openstack/barbican-db-create-h8pnd" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.394034 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7672\" (UniqueName: \"kubernetes.io/projected/b861c52a-7fd3-4027-931a-624b4149e21b-kube-api-access-k7672\") pod \"heat-d1ff-account-create-xsvvf\" (UID: \"b861c52a-7fd3-4027-931a-624b4149e21b\") " pod="openstack/heat-d1ff-account-create-xsvvf" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.394075 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0016dd31-1097-438e-9197-bd3f5c9659d3-operator-scripts\") pod \"barbican-db-create-h8pnd\" (UID: \"0016dd31-1097-438e-9197-bd3f5c9659d3\") " pod="openstack/barbican-db-create-h8pnd" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.418832 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0016dd31-1097-438e-9197-bd3f5c9659d3-operator-scripts\") pod \"barbican-db-create-h8pnd\" (UID: \"0016dd31-1097-438e-9197-bd3f5c9659d3\") " pod="openstack/barbican-db-create-h8pnd" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.420037 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b861c52a-7fd3-4027-931a-624b4149e21b-operator-scripts\") pod \"heat-d1ff-account-create-xsvvf\" (UID: \"b861c52a-7fd3-4027-931a-624b4149e21b\") " pod="openstack/heat-d1ff-account-create-xsvvf" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.443107 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-b4tnp"] Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.445850 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-b4tnp"] Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.445880 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-9c10-account-create-h7m7w"] Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.446086 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-b4tnp" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.446122 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7672\" (UniqueName: \"kubernetes.io/projected/b861c52a-7fd3-4027-931a-624b4149e21b-kube-api-access-k7672\") pod \"heat-d1ff-account-create-xsvvf\" (UID: \"b861c52a-7fd3-4027-931a-624b4149e21b\") " pod="openstack/heat-d1ff-account-create-xsvvf" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.447190 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-9c10-account-create-h7m7w" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.457077 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9542p\" (UniqueName: \"kubernetes.io/projected/0016dd31-1097-438e-9197-bd3f5c9659d3-kube-api-access-9542p\") pod \"barbican-db-create-h8pnd\" (UID: \"0016dd31-1097-438e-9197-bd3f5c9659d3\") " pod="openstack/barbican-db-create-h8pnd" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.458134 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.462924 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.463093 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.463213 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.463412 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-jsx5z" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.471626 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-9c10-account-create-h7m7w"] Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.485075 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-h8pnd" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.485607 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=37.919816482 podStartE2EDuration="45.485585615s" podCreationTimestamp="2025-11-24 13:38:43 +0000 UTC" firstStartedPulling="2025-11-24 13:39:17.683147914 +0000 UTC m=+1270.122272424" lastFinishedPulling="2025-11-24 13:39:25.248917067 +0000 UTC m=+1277.688041557" observedRunningTime="2025-11-24 13:39:28.363672668 +0000 UTC m=+1280.802797168" watchObservedRunningTime="2025-11-24 13:39:28.485585615 +0000 UTC m=+1280.924710105" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.508892 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-d1ff-account-create-xsvvf" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.534134 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-f9m74" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.564585 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-8hph9"] Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.566205 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-8hph9" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.606398 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxmf9\" (UniqueName: \"kubernetes.io/projected/f3ad43f7-b71c-4cd6-ad76-93f881fe820d-kube-api-access-fxmf9\") pod \"neutron-db-create-8hph9\" (UID: \"f3ad43f7-b71c-4cd6-ad76-93f881fe820d\") " pod="openstack/neutron-db-create-8hph9" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.606450 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2dbb3128-e4f7-4ff1-bc59-7873deed9a52-config-data\") pod \"keystone-db-sync-b4tnp\" (UID: \"2dbb3128-e4f7-4ff1-bc59-7873deed9a52\") " pod="openstack/keystone-db-sync-b4tnp" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.606526 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76fxs\" (UniqueName: \"kubernetes.io/projected/9d103981-92d4-4a79-a8e7-cf9f82c8135a-kube-api-access-76fxs\") pod \"barbican-9c10-account-create-h7m7w\" (UID: \"9d103981-92d4-4a79-a8e7-cf9f82c8135a\") " pod="openstack/barbican-9c10-account-create-h7m7w" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.606602 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d103981-92d4-4a79-a8e7-cf9f82c8135a-operator-scripts\") pod \"barbican-9c10-account-create-h7m7w\" (UID: \"9d103981-92d4-4a79-a8e7-cf9f82c8135a\") " pod="openstack/barbican-9c10-account-create-h7m7w" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.606677 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f3ad43f7-b71c-4cd6-ad76-93f881fe820d-operator-scripts\") pod \"neutron-db-create-8hph9\" (UID: \"f3ad43f7-b71c-4cd6-ad76-93f881fe820d\") " pod="openstack/neutron-db-create-8hph9" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.606714 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2mct\" (UniqueName: \"kubernetes.io/projected/2dbb3128-e4f7-4ff1-bc59-7873deed9a52-kube-api-access-t2mct\") pod \"keystone-db-sync-b4tnp\" (UID: \"2dbb3128-e4f7-4ff1-bc59-7873deed9a52\") " pod="openstack/keystone-db-sync-b4tnp" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.606766 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dbb3128-e4f7-4ff1-bc59-7873deed9a52-combined-ca-bundle\") pod \"keystone-db-sync-b4tnp\" (UID: \"2dbb3128-e4f7-4ff1-bc59-7873deed9a52\") " pod="openstack/keystone-db-sync-b4tnp" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.652936 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-8c2b-account-create-dcfff" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.656552 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-8hph9"] Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.707924 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dbb3128-e4f7-4ff1-bc59-7873deed9a52-combined-ca-bundle\") pod \"keystone-db-sync-b4tnp\" (UID: \"2dbb3128-e4f7-4ff1-bc59-7873deed9a52\") " pod="openstack/keystone-db-sync-b4tnp" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.707958 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxmf9\" (UniqueName: \"kubernetes.io/projected/f3ad43f7-b71c-4cd6-ad76-93f881fe820d-kube-api-access-fxmf9\") pod \"neutron-db-create-8hph9\" (UID: \"f3ad43f7-b71c-4cd6-ad76-93f881fe820d\") " pod="openstack/neutron-db-create-8hph9" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.707984 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2dbb3128-e4f7-4ff1-bc59-7873deed9a52-config-data\") pod \"keystone-db-sync-b4tnp\" (UID: \"2dbb3128-e4f7-4ff1-bc59-7873deed9a52\") " pod="openstack/keystone-db-sync-b4tnp" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.708019 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76fxs\" (UniqueName: \"kubernetes.io/projected/9d103981-92d4-4a79-a8e7-cf9f82c8135a-kube-api-access-76fxs\") pod \"barbican-9c10-account-create-h7m7w\" (UID: \"9d103981-92d4-4a79-a8e7-cf9f82c8135a\") " pod="openstack/barbican-9c10-account-create-h7m7w" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.708066 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d103981-92d4-4a79-a8e7-cf9f82c8135a-operator-scripts\") pod \"barbican-9c10-account-create-h7m7w\" (UID: \"9d103981-92d4-4a79-a8e7-cf9f82c8135a\") " pod="openstack/barbican-9c10-account-create-h7m7w" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.708120 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f3ad43f7-b71c-4cd6-ad76-93f881fe820d-operator-scripts\") pod \"neutron-db-create-8hph9\" (UID: \"f3ad43f7-b71c-4cd6-ad76-93f881fe820d\") " pod="openstack/neutron-db-create-8hph9" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.708147 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2mct\" (UniqueName: \"kubernetes.io/projected/2dbb3128-e4f7-4ff1-bc59-7873deed9a52-kube-api-access-t2mct\") pod \"keystone-db-sync-b4tnp\" (UID: \"2dbb3128-e4f7-4ff1-bc59-7873deed9a52\") " pod="openstack/keystone-db-sync-b4tnp" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.713471 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d103981-92d4-4a79-a8e7-cf9f82c8135a-operator-scripts\") pod \"barbican-9c10-account-create-h7m7w\" (UID: \"9d103981-92d4-4a79-a8e7-cf9f82c8135a\") " pod="openstack/barbican-9c10-account-create-h7m7w" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.714327 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f3ad43f7-b71c-4cd6-ad76-93f881fe820d-operator-scripts\") pod \"neutron-db-create-8hph9\" (UID: \"f3ad43f7-b71c-4cd6-ad76-93f881fe820d\") " pod="openstack/neutron-db-create-8hph9" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.738659 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxmf9\" (UniqueName: \"kubernetes.io/projected/f3ad43f7-b71c-4cd6-ad76-93f881fe820d-kube-api-access-fxmf9\") pod \"neutron-db-create-8hph9\" (UID: \"f3ad43f7-b71c-4cd6-ad76-93f881fe820d\") " pod="openstack/neutron-db-create-8hph9" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.744624 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2mct\" (UniqueName: \"kubernetes.io/projected/2dbb3128-e4f7-4ff1-bc59-7873deed9a52-kube-api-access-t2mct\") pod \"keystone-db-sync-b4tnp\" (UID: \"2dbb3128-e4f7-4ff1-bc59-7873deed9a52\") " pod="openstack/keystone-db-sync-b4tnp" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.746663 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76fxs\" (UniqueName: \"kubernetes.io/projected/9d103981-92d4-4a79-a8e7-cf9f82c8135a-kube-api-access-76fxs\") pod \"barbican-9c10-account-create-h7m7w\" (UID: \"9d103981-92d4-4a79-a8e7-cf9f82c8135a\") " pod="openstack/barbican-9c10-account-create-h7m7w" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.763697 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-8a8a-account-create-w645s"] Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.766486 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8a8a-account-create-w645s" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.768715 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.778471 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2dbb3128-e4f7-4ff1-bc59-7873deed9a52-config-data\") pod \"keystone-db-sync-b4tnp\" (UID: \"2dbb3128-e4f7-4ff1-bc59-7873deed9a52\") " pod="openstack/keystone-db-sync-b4tnp" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.779228 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dbb3128-e4f7-4ff1-bc59-7873deed9a52-combined-ca-bundle\") pod \"keystone-db-sync-b4tnp\" (UID: \"2dbb3128-e4f7-4ff1-bc59-7873deed9a52\") " pod="openstack/keystone-db-sync-b4tnp" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.810481 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65p59\" (UniqueName: \"kubernetes.io/projected/91ea1a14-92d9-4a15-9cb7-accdb57351b0-kube-api-access-65p59\") pod \"neutron-8a8a-account-create-w645s\" (UID: \"91ea1a14-92d9-4a15-9cb7-accdb57351b0\") " pod="openstack/neutron-8a8a-account-create-w645s" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.810983 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/91ea1a14-92d9-4a15-9cb7-accdb57351b0-operator-scripts\") pod \"neutron-8a8a-account-create-w645s\" (UID: \"91ea1a14-92d9-4a15-9cb7-accdb57351b0\") " pod="openstack/neutron-8a8a-account-create-w645s" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.819988 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8a8a-account-create-w645s"] Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.919892 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65p59\" (UniqueName: \"kubernetes.io/projected/91ea1a14-92d9-4a15-9cb7-accdb57351b0-kube-api-access-65p59\") pod \"neutron-8a8a-account-create-w645s\" (UID: \"91ea1a14-92d9-4a15-9cb7-accdb57351b0\") " pod="openstack/neutron-8a8a-account-create-w645s" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.919989 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/91ea1a14-92d9-4a15-9cb7-accdb57351b0-operator-scripts\") pod \"neutron-8a8a-account-create-w645s\" (UID: \"91ea1a14-92d9-4a15-9cb7-accdb57351b0\") " pod="openstack/neutron-8a8a-account-create-w645s" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.921200 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/91ea1a14-92d9-4a15-9cb7-accdb57351b0-operator-scripts\") pod \"neutron-8a8a-account-create-w645s\" (UID: \"91ea1a14-92d9-4a15-9cb7-accdb57351b0\") " pod="openstack/neutron-8a8a-account-create-w645s" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.936118 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-5t2p6"] Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.937752 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.941488 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.956955 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-5t2p6"] Nov 24 13:39:28 crc kubenswrapper[5039]: I1124 13:39:28.978723 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65p59\" (UniqueName: \"kubernetes.io/projected/91ea1a14-92d9-4a15-9cb7-accdb57351b0-kube-api-access-65p59\") pod \"neutron-8a8a-account-create-w645s\" (UID: \"91ea1a14-92d9-4a15-9cb7-accdb57351b0\") " pod="openstack/neutron-8a8a-account-create-w645s" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.021801 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vwg59\" (UniqueName: \"kubernetes.io/projected/938759c5-f8df-4087-a815-e6346ce7de38-kube-api-access-vwg59\") pod \"dnsmasq-dns-77585f5f8c-5t2p6\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.021874 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-config\") pod \"dnsmasq-dns-77585f5f8c-5t2p6\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.021912 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-5t2p6\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.021964 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-5t2p6\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.022019 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-5t2p6\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.022052 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-5t2p6\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.062129 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-b4tnp" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.080886 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-9c10-account-create-h7m7w" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.097999 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8a8a-account-create-w645s" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.123335 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-5t2p6\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.123459 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-5t2p6\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.123559 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-5t2p6\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.123613 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-5t2p6\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.123701 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vwg59\" (UniqueName: \"kubernetes.io/projected/938759c5-f8df-4087-a815-e6346ce7de38-kube-api-access-vwg59\") pod \"dnsmasq-dns-77585f5f8c-5t2p6\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.123769 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-config\") pod \"dnsmasq-dns-77585f5f8c-5t2p6\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.124568 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-5t2p6\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.125092 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-5t2p6\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.125609 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-mdrbd"] Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.126368 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-8hph9" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.126816 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-mdrbd" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.130072 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-5t2p6\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.136466 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-mdrbd"] Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.144947 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-5t2p6\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.160140 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-config\") pod \"dnsmasq-dns-77585f5f8c-5t2p6\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.209708 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vwg59\" (UniqueName: \"kubernetes.io/projected/938759c5-f8df-4087-a815-e6346ce7de38-kube-api-access-vwg59\") pod \"dnsmasq-dns-77585f5f8c-5t2p6\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.226718 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f09296a1-0a30-4fb4-ba9f-c4744066800b-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-mdrbd\" (UID: \"f09296a1-0a30-4fb4-ba9f-c4744066800b\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-mdrbd" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.226903 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xnf9\" (UniqueName: \"kubernetes.io/projected/f09296a1-0a30-4fb4-ba9f-c4744066800b-kube-api-access-5xnf9\") pod \"mysqld-exporter-openstack-cell1-db-create-mdrbd\" (UID: \"f09296a1-0a30-4fb4-ba9f-c4744066800b\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-mdrbd" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.233252 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-bdknn"] Nov 24 13:39:29 crc kubenswrapper[5039]: W1124 13:39:29.306900 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc56ef76f_5741_4430_8973_4c035fc82525.slice/crio-4fdc8cbcf19def172cb1ffb7d3572f841937b7ba2547d62003420c0a1ffe27b9 WatchSource:0}: Error finding container 4fdc8cbcf19def172cb1ffb7d3572f841937b7ba2547d62003420c0a1ffe27b9: Status 404 returned error can't find the container with id 4fdc8cbcf19def172cb1ffb7d3572f841937b7ba2547d62003420c0a1ffe27b9 Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.328298 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xnf9\" (UniqueName: \"kubernetes.io/projected/f09296a1-0a30-4fb4-ba9f-c4744066800b-kube-api-access-5xnf9\") pod \"mysqld-exporter-openstack-cell1-db-create-mdrbd\" (UID: \"f09296a1-0a30-4fb4-ba9f-c4744066800b\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-mdrbd" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.328462 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f09296a1-0a30-4fb4-ba9f-c4744066800b-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-mdrbd\" (UID: \"f09296a1-0a30-4fb4-ba9f-c4744066800b\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-mdrbd" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.329136 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f09296a1-0a30-4fb4-ba9f-c4744066800b-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-mdrbd\" (UID: \"f09296a1-0a30-4fb4-ba9f-c4744066800b\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-mdrbd" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.344982 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-35c8-account-create-5lwlj"] Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.354010 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-35c8-account-create-5lwlj" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.356593 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-cell1-db-secret" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.397882 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-35c8-account-create-5lwlj"] Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.403031 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xnf9\" (UniqueName: \"kubernetes.io/projected/f09296a1-0a30-4fb4-ba9f-c4744066800b-kube-api-access-5xnf9\") pod \"mysqld-exporter-openstack-cell1-db-create-mdrbd\" (UID: \"f09296a1-0a30-4fb4-ba9f-c4744066800b\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-mdrbd" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.423833 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-d1ff-account-create-xsvvf"] Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.429963 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/40c8f747-a116-404e-af9e-85f85a759bed-operator-scripts\") pod \"mysqld-exporter-35c8-account-create-5lwlj\" (UID: \"40c8f747-a116-404e-af9e-85f85a759bed\") " pod="openstack/mysqld-exporter-35c8-account-create-5lwlj" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.430025 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbzv6\" (UniqueName: \"kubernetes.io/projected/40c8f747-a116-404e-af9e-85f85a759bed-kube-api-access-bbzv6\") pod \"mysqld-exporter-35c8-account-create-5lwlj\" (UID: \"40c8f747-a116-404e-af9e-85f85a759bed\") " pod="openstack/mysqld-exporter-35c8-account-create-5lwlj" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.470153 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:29 crc kubenswrapper[5039]: W1124 13:39:29.484112 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb861c52a_7fd3_4027_931a_624b4149e21b.slice/crio-c571ff085799519e95918937bc330dfcd7f328f7bb4a00bfd80b0b4ac889bd19 WatchSource:0}: Error finding container c571ff085799519e95918937bc330dfcd7f328f7bb4a00bfd80b0b4ac889bd19: Status 404 returned error can't find the container with id c571ff085799519e95918937bc330dfcd7f328f7bb4a00bfd80b0b4ac889bd19 Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.484895 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-mdrbd" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.557795 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/40c8f747-a116-404e-af9e-85f85a759bed-operator-scripts\") pod \"mysqld-exporter-35c8-account-create-5lwlj\" (UID: \"40c8f747-a116-404e-af9e-85f85a759bed\") " pod="openstack/mysqld-exporter-35c8-account-create-5lwlj" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.558194 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbzv6\" (UniqueName: \"kubernetes.io/projected/40c8f747-a116-404e-af9e-85f85a759bed-kube-api-access-bbzv6\") pod \"mysqld-exporter-35c8-account-create-5lwlj\" (UID: \"40c8f747-a116-404e-af9e-85f85a759bed\") " pod="openstack/mysqld-exporter-35c8-account-create-5lwlj" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.559254 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/40c8f747-a116-404e-af9e-85f85a759bed-operator-scripts\") pod \"mysqld-exporter-35c8-account-create-5lwlj\" (UID: \"40c8f747-a116-404e-af9e-85f85a759bed\") " pod="openstack/mysqld-exporter-35c8-account-create-5lwlj" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.593413 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbzv6\" (UniqueName: \"kubernetes.io/projected/40c8f747-a116-404e-af9e-85f85a759bed-kube-api-access-bbzv6\") pod \"mysqld-exporter-35c8-account-create-5lwlj\" (UID: \"40c8f747-a116-404e-af9e-85f85a759bed\") " pod="openstack/mysqld-exporter-35c8-account-create-5lwlj" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.633376 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-8c2b-account-create-dcfff"] Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.662718 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-f9m74"] Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.761513 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-35c8-account-create-5lwlj" Nov 24 13:39:29 crc kubenswrapper[5039]: I1124 13:39:29.835188 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-h8pnd"] Nov 24 13:39:30 crc kubenswrapper[5039]: I1124 13:39:30.172020 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-9c10-account-create-h7m7w"] Nov 24 13:39:30 crc kubenswrapper[5039]: I1124 13:39:30.182409 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-b4tnp"] Nov 24 13:39:30 crc kubenswrapper[5039]: I1124 13:39:30.195190 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8a8a-account-create-w645s"] Nov 24 13:39:30 crc kubenswrapper[5039]: I1124 13:39:30.209737 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-8hph9"] Nov 24 13:39:30 crc kubenswrapper[5039]: W1124 13:39:30.225821 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d103981_92d4_4a79_a8e7_cf9f82c8135a.slice/crio-e43442925e2f0794da7e466c9ae2eb79142039750b7ff232fa64a60bbb108e56 WatchSource:0}: Error finding container e43442925e2f0794da7e466c9ae2eb79142039750b7ff232fa64a60bbb108e56: Status 404 returned error can't find the container with id e43442925e2f0794da7e466c9ae2eb79142039750b7ff232fa64a60bbb108e56 Nov 24 13:39:30 crc kubenswrapper[5039]: W1124 13:39:30.235184 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod91ea1a14_92d9_4a15_9cb7_accdb57351b0.slice/crio-da54c5e55f6c4ff8d390a21bb5a44a3e5bb03f4d653370e1cb822c59b05f7bcf WatchSource:0}: Error finding container da54c5e55f6c4ff8d390a21bb5a44a3e5bb03f4d653370e1cb822c59b05f7bcf: Status 404 returned error can't find the container with id da54c5e55f6c4ff8d390a21bb5a44a3e5bb03f4d653370e1cb822c59b05f7bcf Nov 24 13:39:30 crc kubenswrapper[5039]: I1124 13:39:30.360462 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-9c10-account-create-h7m7w" event={"ID":"9d103981-92d4-4a79-a8e7-cf9f82c8135a","Type":"ContainerStarted","Data":"e43442925e2f0794da7e466c9ae2eb79142039750b7ff232fa64a60bbb108e56"} Nov 24 13:39:30 crc kubenswrapper[5039]: I1124 13:39:30.366405 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-b4tnp" event={"ID":"2dbb3128-e4f7-4ff1-bc59-7873deed9a52","Type":"ContainerStarted","Data":"818d6024d44fb9d6e4e75fd4dc93e0c7108139aa0637b19e900772d5f4c97acb"} Nov 24 13:39:30 crc kubenswrapper[5039]: I1124 13:39:30.371733 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-8hph9" event={"ID":"f3ad43f7-b71c-4cd6-ad76-93f881fe820d","Type":"ContainerStarted","Data":"ca15db05fe55ae849bc3d0c882552e2e0b0e4e13e8c742ea9f816bee53842126"} Nov 24 13:39:30 crc kubenswrapper[5039]: I1124 13:39:30.371970 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-mdrbd"] Nov 24 13:39:30 crc kubenswrapper[5039]: I1124 13:39:30.373588 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-8c2b-account-create-dcfff" event={"ID":"d373e37d-246d-4b90-863b-b224a059c4e1","Type":"ContainerStarted","Data":"be63acda7e836149a2e3a4f605e8d1cf4054f503aa7904ed03998799cddc0c9d"} Nov 24 13:39:30 crc kubenswrapper[5039]: I1124 13:39:30.394311 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-5t2p6"] Nov 24 13:39:30 crc kubenswrapper[5039]: I1124 13:39:30.394380 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8a8a-account-create-w645s" event={"ID":"91ea1a14-92d9-4a15-9cb7-accdb57351b0","Type":"ContainerStarted","Data":"da54c5e55f6c4ff8d390a21bb5a44a3e5bb03f4d653370e1cb822c59b05f7bcf"} Nov 24 13:39:30 crc kubenswrapper[5039]: I1124 13:39:30.401843 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-f9m74" event={"ID":"739c6211-e4b4-4386-9d63-c9b680eb9114","Type":"ContainerStarted","Data":"f9bdad9c127b3acb97a2f41fc0682c18c0f04da5766b9fc51b86a06504c9b677"} Nov 24 13:39:30 crc kubenswrapper[5039]: I1124 13:39:30.404371 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-bdknn" event={"ID":"c56ef76f-5741-4430-8973-4c035fc82525","Type":"ContainerStarted","Data":"8950ed724b546f92b7d71867b6ab3f8f57d0fd14baa0fedbe4f0a78589638084"} Nov 24 13:39:30 crc kubenswrapper[5039]: I1124 13:39:30.404418 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-bdknn" event={"ID":"c56ef76f-5741-4430-8973-4c035fc82525","Type":"ContainerStarted","Data":"4fdc8cbcf19def172cb1ffb7d3572f841937b7ba2547d62003420c0a1ffe27b9"} Nov 24 13:39:30 crc kubenswrapper[5039]: W1124 13:39:30.406314 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod938759c5_f8df_4087_a815_e6346ce7de38.slice/crio-9615e4f79753cd8ae1a15cd8e1b7ecff89ba9a4876fc351ba062c5fe74f297f6 WatchSource:0}: Error finding container 9615e4f79753cd8ae1a15cd8e1b7ecff89ba9a4876fc351ba062c5fe74f297f6: Status 404 returned error can't find the container with id 9615e4f79753cd8ae1a15cd8e1b7ecff89ba9a4876fc351ba062c5fe74f297f6 Nov 24 13:39:30 crc kubenswrapper[5039]: I1124 13:39:30.422266 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-h8pnd" event={"ID":"0016dd31-1097-438e-9197-bd3f5c9659d3","Type":"ContainerStarted","Data":"47dbd718e091cb2c624116a7a1689cdf373d020d90b2365c6dc13be2d8c42497"} Nov 24 13:39:30 crc kubenswrapper[5039]: I1124 13:39:30.432327 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-bdknn" podStartSLOduration=3.432311753 podStartE2EDuration="3.432311753s" podCreationTimestamp="2025-11-24 13:39:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:39:30.426991684 +0000 UTC m=+1282.866116184" watchObservedRunningTime="2025-11-24 13:39:30.432311753 +0000 UTC m=+1282.871436253" Nov 24 13:39:30 crc kubenswrapper[5039]: I1124 13:39:30.432480 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-d1ff-account-create-xsvvf" event={"ID":"b861c52a-7fd3-4027-931a-624b4149e21b","Type":"ContainerStarted","Data":"c571ff085799519e95918937bc330dfcd7f328f7bb4a00bfd80b0b4ac889bd19"} Nov 24 13:39:30 crc kubenswrapper[5039]: I1124 13:39:30.464510 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-35c8-account-create-5lwlj"] Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.461959 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-9c10-account-create-h7m7w" event={"ID":"9d103981-92d4-4a79-a8e7-cf9f82c8135a","Type":"ContainerStarted","Data":"b3bd42281f82b7f052bc504a1137f9d97fe06b4bcffada9efd517907c7badbfb"} Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.468473 5039 generic.go:334] "Generic (PLEG): container finished" podID="91ea1a14-92d9-4a15-9cb7-accdb57351b0" containerID="0a224cfdf0893ddc4474174e70a8ac02ee1fe608afca2f72c6590de8442bbab5" exitCode=0 Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.468540 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8a8a-account-create-w645s" event={"ID":"91ea1a14-92d9-4a15-9cb7-accdb57351b0","Type":"ContainerDied","Data":"0a224cfdf0893ddc4474174e70a8ac02ee1fe608afca2f72c6590de8442bbab5"} Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.472266 5039 generic.go:334] "Generic (PLEG): container finished" podID="40c8f747-a116-404e-af9e-85f85a759bed" containerID="b0180965e15e5c38177a1db790e1068a187ffef0f2295e39adc2b759b4f864fd" exitCode=0 Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.472315 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-35c8-account-create-5lwlj" event={"ID":"40c8f747-a116-404e-af9e-85f85a759bed","Type":"ContainerDied","Data":"b0180965e15e5c38177a1db790e1068a187ffef0f2295e39adc2b759b4f864fd"} Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.472333 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-35c8-account-create-5lwlj" event={"ID":"40c8f747-a116-404e-af9e-85f85a759bed","Type":"ContainerStarted","Data":"1512f291531e716cd2fdde42fc36422c2312b2745aa561ee450684360a16e192"} Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.474221 5039 generic.go:334] "Generic (PLEG): container finished" podID="d373e37d-246d-4b90-863b-b224a059c4e1" containerID="6757bdea0fa369f6e9dcb78a19d029d0785ce801d1b7126895435b166de0baa7" exitCode=0 Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.474263 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-8c2b-account-create-dcfff" event={"ID":"d373e37d-246d-4b90-863b-b224a059c4e1","Type":"ContainerDied","Data":"6757bdea0fa369f6e9dcb78a19d029d0785ce801d1b7126895435b166de0baa7"} Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.485235 5039 generic.go:334] "Generic (PLEG): container finished" podID="739c6211-e4b4-4386-9d63-c9b680eb9114" containerID="c2f0914e2a0324a4f9c803881825b288e8808496d77055ec30366e976583b6b5" exitCode=0 Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.485323 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-f9m74" event={"ID":"739c6211-e4b4-4386-9d63-c9b680eb9114","Type":"ContainerDied","Data":"c2f0914e2a0324a4f9c803881825b288e8808496d77055ec30366e976583b6b5"} Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.487144 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-9c10-account-create-h7m7w" podStartSLOduration=3.487127052 podStartE2EDuration="3.487127052s" podCreationTimestamp="2025-11-24 13:39:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:39:31.474421572 +0000 UTC m=+1283.913546072" watchObservedRunningTime="2025-11-24 13:39:31.487127052 +0000 UTC m=+1283.926251552" Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.489736 5039 generic.go:334] "Generic (PLEG): container finished" podID="f09296a1-0a30-4fb4-ba9f-c4744066800b" containerID="e9aaa5251be8182c8baa2279431d7aa214fd17781ac0bcd3b738343f9f2bfa84" exitCode=0 Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.489815 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-mdrbd" event={"ID":"f09296a1-0a30-4fb4-ba9f-c4744066800b","Type":"ContainerDied","Data":"e9aaa5251be8182c8baa2279431d7aa214fd17781ac0bcd3b738343f9f2bfa84"} Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.489839 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-mdrbd" event={"ID":"f09296a1-0a30-4fb4-ba9f-c4744066800b","Type":"ContainerStarted","Data":"c9c7991bd56780ba2a042607f99d645448091efcc7710d938966fe6773753f63"} Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.494914 5039 generic.go:334] "Generic (PLEG): container finished" podID="c56ef76f-5741-4430-8973-4c035fc82525" containerID="8950ed724b546f92b7d71867b6ab3f8f57d0fd14baa0fedbe4f0a78589638084" exitCode=0 Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.495008 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-bdknn" event={"ID":"c56ef76f-5741-4430-8973-4c035fc82525","Type":"ContainerDied","Data":"8950ed724b546f92b7d71867b6ab3f8f57d0fd14baa0fedbe4f0a78589638084"} Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.496929 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-h8pnd" event={"ID":"0016dd31-1097-438e-9197-bd3f5c9659d3","Type":"ContainerStarted","Data":"d94b324e68efb784416ce91f44a095ea45e3ffb2eae1cd0ad9c58fe73d2887b3"} Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.498304 5039 generic.go:334] "Generic (PLEG): container finished" podID="f3ad43f7-b71c-4cd6-ad76-93f881fe820d" containerID="7c65004452f1bcc11272cc1df757a0ece2c12a06cee1351b5caaad4a4e22b0de" exitCode=0 Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.498353 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-8hph9" event={"ID":"f3ad43f7-b71c-4cd6-ad76-93f881fe820d","Type":"ContainerDied","Data":"7c65004452f1bcc11272cc1df757a0ece2c12a06cee1351b5caaad4a4e22b0de"} Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.499995 5039 generic.go:334] "Generic (PLEG): container finished" podID="b861c52a-7fd3-4027-931a-624b4149e21b" containerID="0f123540ba7e5b3239817353478fbbac74f3b88ae4c9c89f0e6cb242756aa778" exitCode=0 Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.500050 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-d1ff-account-create-xsvvf" event={"ID":"b861c52a-7fd3-4027-931a-624b4149e21b","Type":"ContainerDied","Data":"0f123540ba7e5b3239817353478fbbac74f3b88ae4c9c89f0e6cb242756aa778"} Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.502894 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"54819035-007f-4162-9419-d825f50e1ce9","Type":"ContainerStarted","Data":"1b312f134f89d05e7839efb5def50b5a1a1cd6499ce0c7463186baf1f396963f"} Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.513316 5039 generic.go:334] "Generic (PLEG): container finished" podID="938759c5-f8df-4087-a815-e6346ce7de38" containerID="d1633796936d0d4a5f1281754580cc7980ef3250d6fe9c3557d65c8db10051ce" exitCode=0 Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.513370 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" event={"ID":"938759c5-f8df-4087-a815-e6346ce7de38","Type":"ContainerDied","Data":"d1633796936d0d4a5f1281754580cc7980ef3250d6fe9c3557d65c8db10051ce"} Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.513411 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" event={"ID":"938759c5-f8df-4087-a815-e6346ce7de38","Type":"ContainerStarted","Data":"9615e4f79753cd8ae1a15cd8e1b7ecff89ba9a4876fc351ba062c5fe74f297f6"} Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.763079 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-vvjsm"] Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.764664 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-vvjsm" Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.766558 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.769814 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-2gxh2" Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.777316 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-vvjsm"] Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.923673 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c22dae7-e545-4eb0-9552-f3c691f397df-combined-ca-bundle\") pod \"glance-db-sync-vvjsm\" (UID: \"2c22dae7-e545-4eb0-9552-f3c691f397df\") " pod="openstack/glance-db-sync-vvjsm" Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.923751 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f27nh\" (UniqueName: \"kubernetes.io/projected/2c22dae7-e545-4eb0-9552-f3c691f397df-kube-api-access-f27nh\") pod \"glance-db-sync-vvjsm\" (UID: \"2c22dae7-e545-4eb0-9552-f3c691f397df\") " pod="openstack/glance-db-sync-vvjsm" Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.923779 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c22dae7-e545-4eb0-9552-f3c691f397df-config-data\") pod \"glance-db-sync-vvjsm\" (UID: \"2c22dae7-e545-4eb0-9552-f3c691f397df\") " pod="openstack/glance-db-sync-vvjsm" Nov 24 13:39:31 crc kubenswrapper[5039]: I1124 13:39:31.923801 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2c22dae7-e545-4eb0-9552-f3c691f397df-db-sync-config-data\") pod \"glance-db-sync-vvjsm\" (UID: \"2c22dae7-e545-4eb0-9552-f3c691f397df\") " pod="openstack/glance-db-sync-vvjsm" Nov 24 13:39:32 crc kubenswrapper[5039]: I1124 13:39:32.025763 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c22dae7-e545-4eb0-9552-f3c691f397df-combined-ca-bundle\") pod \"glance-db-sync-vvjsm\" (UID: \"2c22dae7-e545-4eb0-9552-f3c691f397df\") " pod="openstack/glance-db-sync-vvjsm" Nov 24 13:39:32 crc kubenswrapper[5039]: I1124 13:39:32.025983 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f27nh\" (UniqueName: \"kubernetes.io/projected/2c22dae7-e545-4eb0-9552-f3c691f397df-kube-api-access-f27nh\") pod \"glance-db-sync-vvjsm\" (UID: \"2c22dae7-e545-4eb0-9552-f3c691f397df\") " pod="openstack/glance-db-sync-vvjsm" Nov 24 13:39:32 crc kubenswrapper[5039]: I1124 13:39:32.026100 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c22dae7-e545-4eb0-9552-f3c691f397df-config-data\") pod \"glance-db-sync-vvjsm\" (UID: \"2c22dae7-e545-4eb0-9552-f3c691f397df\") " pod="openstack/glance-db-sync-vvjsm" Nov 24 13:39:32 crc kubenswrapper[5039]: I1124 13:39:32.026140 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2c22dae7-e545-4eb0-9552-f3c691f397df-db-sync-config-data\") pod \"glance-db-sync-vvjsm\" (UID: \"2c22dae7-e545-4eb0-9552-f3c691f397df\") " pod="openstack/glance-db-sync-vvjsm" Nov 24 13:39:32 crc kubenswrapper[5039]: I1124 13:39:32.031187 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c22dae7-e545-4eb0-9552-f3c691f397df-config-data\") pod \"glance-db-sync-vvjsm\" (UID: \"2c22dae7-e545-4eb0-9552-f3c691f397df\") " pod="openstack/glance-db-sync-vvjsm" Nov 24 13:39:32 crc kubenswrapper[5039]: I1124 13:39:32.031188 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c22dae7-e545-4eb0-9552-f3c691f397df-combined-ca-bundle\") pod \"glance-db-sync-vvjsm\" (UID: \"2c22dae7-e545-4eb0-9552-f3c691f397df\") " pod="openstack/glance-db-sync-vvjsm" Nov 24 13:39:32 crc kubenswrapper[5039]: I1124 13:39:32.039634 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2c22dae7-e545-4eb0-9552-f3c691f397df-db-sync-config-data\") pod \"glance-db-sync-vvjsm\" (UID: \"2c22dae7-e545-4eb0-9552-f3c691f397df\") " pod="openstack/glance-db-sync-vvjsm" Nov 24 13:39:32 crc kubenswrapper[5039]: I1124 13:39:32.056256 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f27nh\" (UniqueName: \"kubernetes.io/projected/2c22dae7-e545-4eb0-9552-f3c691f397df-kube-api-access-f27nh\") pod \"glance-db-sync-vvjsm\" (UID: \"2c22dae7-e545-4eb0-9552-f3c691f397df\") " pod="openstack/glance-db-sync-vvjsm" Nov 24 13:39:32 crc kubenswrapper[5039]: I1124 13:39:32.196341 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-vvjsm" Nov 24 13:39:32 crc kubenswrapper[5039]: I1124 13:39:32.524094 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" event={"ID":"938759c5-f8df-4087-a815-e6346ce7de38","Type":"ContainerStarted","Data":"08a5e493313c144245266dfd6658b8d5cd3a02f6f76b837738c7a8ea96006770"} Nov 24 13:39:32 crc kubenswrapper[5039]: I1124 13:39:32.524675 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:32 crc kubenswrapper[5039]: I1124 13:39:32.525722 5039 generic.go:334] "Generic (PLEG): container finished" podID="0016dd31-1097-438e-9197-bd3f5c9659d3" containerID="d94b324e68efb784416ce91f44a095ea45e3ffb2eae1cd0ad9c58fe73d2887b3" exitCode=0 Nov 24 13:39:32 crc kubenswrapper[5039]: I1124 13:39:32.525786 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-h8pnd" event={"ID":"0016dd31-1097-438e-9197-bd3f5c9659d3","Type":"ContainerDied","Data":"d94b324e68efb784416ce91f44a095ea45e3ffb2eae1cd0ad9c58fe73d2887b3"} Nov 24 13:39:32 crc kubenswrapper[5039]: I1124 13:39:32.527706 5039 generic.go:334] "Generic (PLEG): container finished" podID="9d103981-92d4-4a79-a8e7-cf9f82c8135a" containerID="b3bd42281f82b7f052bc504a1137f9d97fe06b4bcffada9efd517907c7badbfb" exitCode=0 Nov 24 13:39:32 crc kubenswrapper[5039]: I1124 13:39:32.527785 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-9c10-account-create-h7m7w" event={"ID":"9d103981-92d4-4a79-a8e7-cf9f82c8135a","Type":"ContainerDied","Data":"b3bd42281f82b7f052bc504a1137f9d97fe06b4bcffada9efd517907c7badbfb"} Nov 24 13:39:32 crc kubenswrapper[5039]: I1124 13:39:32.567472 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" podStartSLOduration=4.567446243 podStartE2EDuration="4.567446243s" podCreationTimestamp="2025-11-24 13:39:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:39:32.545892747 +0000 UTC m=+1284.985017257" watchObservedRunningTime="2025-11-24 13:39:32.567446243 +0000 UTC m=+1285.006570743" Nov 24 13:39:32 crc kubenswrapper[5039]: I1124 13:39:32.865085 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-mdrbd" Nov 24 13:39:32 crc kubenswrapper[5039]: I1124 13:39:32.931190 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-vvjsm"] Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.025277 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-d1ff-account-create-xsvvf" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.034052 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-8c2b-account-create-dcfff" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.071242 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hnplt\" (UniqueName: \"kubernetes.io/projected/d373e37d-246d-4b90-863b-b224a059c4e1-kube-api-access-hnplt\") pod \"d373e37d-246d-4b90-863b-b224a059c4e1\" (UID: \"d373e37d-246d-4b90-863b-b224a059c4e1\") " Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.071311 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b861c52a-7fd3-4027-931a-624b4149e21b-operator-scripts\") pod \"b861c52a-7fd3-4027-931a-624b4149e21b\" (UID: \"b861c52a-7fd3-4027-931a-624b4149e21b\") " Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.071395 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k7672\" (UniqueName: \"kubernetes.io/projected/b861c52a-7fd3-4027-931a-624b4149e21b-kube-api-access-k7672\") pod \"b861c52a-7fd3-4027-931a-624b4149e21b\" (UID: \"b861c52a-7fd3-4027-931a-624b4149e21b\") " Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.071423 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f09296a1-0a30-4fb4-ba9f-c4744066800b-operator-scripts\") pod \"f09296a1-0a30-4fb4-ba9f-c4744066800b\" (UID: \"f09296a1-0a30-4fb4-ba9f-c4744066800b\") " Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.071462 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5xnf9\" (UniqueName: \"kubernetes.io/projected/f09296a1-0a30-4fb4-ba9f-c4744066800b-kube-api-access-5xnf9\") pod \"f09296a1-0a30-4fb4-ba9f-c4744066800b\" (UID: \"f09296a1-0a30-4fb4-ba9f-c4744066800b\") " Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.071486 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d373e37d-246d-4b90-863b-b224a059c4e1-operator-scripts\") pod \"d373e37d-246d-4b90-863b-b224a059c4e1\" (UID: \"d373e37d-246d-4b90-863b-b224a059c4e1\") " Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.072457 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f09296a1-0a30-4fb4-ba9f-c4744066800b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f09296a1-0a30-4fb4-ba9f-c4744066800b" (UID: "f09296a1-0a30-4fb4-ba9f-c4744066800b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.072548 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d373e37d-246d-4b90-863b-b224a059c4e1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d373e37d-246d-4b90-863b-b224a059c4e1" (UID: "d373e37d-246d-4b90-863b-b224a059c4e1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.072621 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b861c52a-7fd3-4027-931a-624b4149e21b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b861c52a-7fd3-4027-931a-624b4149e21b" (UID: "b861c52a-7fd3-4027-931a-624b4149e21b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.078368 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b861c52a-7fd3-4027-931a-624b4149e21b-kube-api-access-k7672" (OuterVolumeSpecName: "kube-api-access-k7672") pod "b861c52a-7fd3-4027-931a-624b4149e21b" (UID: "b861c52a-7fd3-4027-931a-624b4149e21b"). InnerVolumeSpecName "kube-api-access-k7672". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.079435 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f09296a1-0a30-4fb4-ba9f-c4744066800b-kube-api-access-5xnf9" (OuterVolumeSpecName: "kube-api-access-5xnf9") pod "f09296a1-0a30-4fb4-ba9f-c4744066800b" (UID: "f09296a1-0a30-4fb4-ba9f-c4744066800b"). InnerVolumeSpecName "kube-api-access-5xnf9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.086434 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d373e37d-246d-4b90-863b-b224a059c4e1-kube-api-access-hnplt" (OuterVolumeSpecName: "kube-api-access-hnplt") pod "d373e37d-246d-4b90-863b-b224a059c4e1" (UID: "d373e37d-246d-4b90-863b-b224a059c4e1"). InnerVolumeSpecName "kube-api-access-hnplt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.154528 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-f9m74" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.174896 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/739c6211-e4b4-4386-9d63-c9b680eb9114-operator-scripts\") pod \"739c6211-e4b4-4386-9d63-c9b680eb9114\" (UID: \"739c6211-e4b4-4386-9d63-c9b680eb9114\") " Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.175069 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4mzhc\" (UniqueName: \"kubernetes.io/projected/739c6211-e4b4-4386-9d63-c9b680eb9114-kube-api-access-4mzhc\") pod \"739c6211-e4b4-4386-9d63-c9b680eb9114\" (UID: \"739c6211-e4b4-4386-9d63-c9b680eb9114\") " Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.175599 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f09296a1-0a30-4fb4-ba9f-c4744066800b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.175618 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5xnf9\" (UniqueName: \"kubernetes.io/projected/f09296a1-0a30-4fb4-ba9f-c4744066800b-kube-api-access-5xnf9\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.175629 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d373e37d-246d-4b90-863b-b224a059c4e1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.175637 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hnplt\" (UniqueName: \"kubernetes.io/projected/d373e37d-246d-4b90-863b-b224a059c4e1-kube-api-access-hnplt\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.175645 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b861c52a-7fd3-4027-931a-624b4149e21b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.175656 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k7672\" (UniqueName: \"kubernetes.io/projected/b861c52a-7fd3-4027-931a-624b4149e21b-kube-api-access-k7672\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.176693 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/739c6211-e4b4-4386-9d63-c9b680eb9114-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "739c6211-e4b4-4386-9d63-c9b680eb9114" (UID: "739c6211-e4b4-4386-9d63-c9b680eb9114"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.180739 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/739c6211-e4b4-4386-9d63-c9b680eb9114-kube-api-access-4mzhc" (OuterVolumeSpecName: "kube-api-access-4mzhc") pod "739c6211-e4b4-4386-9d63-c9b680eb9114" (UID: "739c6211-e4b4-4386-9d63-c9b680eb9114"). InnerVolumeSpecName "kube-api-access-4mzhc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.284175 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4mzhc\" (UniqueName: \"kubernetes.io/projected/739c6211-e4b4-4386-9d63-c9b680eb9114-kube-api-access-4mzhc\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.284547 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/739c6211-e4b4-4386-9d63-c9b680eb9114-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.538188 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-8c2b-account-create-dcfff" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.538195 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-8c2b-account-create-dcfff" event={"ID":"d373e37d-246d-4b90-863b-b224a059c4e1","Type":"ContainerDied","Data":"be63acda7e836149a2e3a4f605e8d1cf4054f503aa7904ed03998799cddc0c9d"} Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.538288 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="be63acda7e836149a2e3a4f605e8d1cf4054f503aa7904ed03998799cddc0c9d" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.561697 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-f9m74" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.561736 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-f9m74" event={"ID":"739c6211-e4b4-4386-9d63-c9b680eb9114","Type":"ContainerDied","Data":"f9bdad9c127b3acb97a2f41fc0682c18c0f04da5766b9fc51b86a06504c9b677"} Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.561773 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f9bdad9c127b3acb97a2f41fc0682c18c0f04da5766b9fc51b86a06504c9b677" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.563849 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-mdrbd" event={"ID":"f09296a1-0a30-4fb4-ba9f-c4744066800b","Type":"ContainerDied","Data":"c9c7991bd56780ba2a042607f99d645448091efcc7710d938966fe6773753f63"} Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.563894 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c9c7991bd56780ba2a042607f99d645448091efcc7710d938966fe6773753f63" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.563960 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-mdrbd" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.565661 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-vvjsm" event={"ID":"2c22dae7-e545-4eb0-9552-f3c691f397df","Type":"ContainerStarted","Data":"1311f3284b52fb7222b5ee544dc846e7a011ea146fc47fc317796be6e6191208"} Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.570581 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-d1ff-account-create-xsvvf" event={"ID":"b861c52a-7fd3-4027-931a-624b4149e21b","Type":"ContainerDied","Data":"c571ff085799519e95918937bc330dfcd7f328f7bb4a00bfd80b0b4ac889bd19"} Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.570636 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c571ff085799519e95918937bc330dfcd7f328f7bb4a00bfd80b0b4ac889bd19" Nov 24 13:39:33 crc kubenswrapper[5039]: I1124 13:39:33.570860 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-d1ff-account-create-xsvvf" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.026581 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-h8pnd" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.033605 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-bdknn" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.044274 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8a8a-account-create-w645s" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.049186 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9542p\" (UniqueName: \"kubernetes.io/projected/0016dd31-1097-438e-9197-bd3f5c9659d3-kube-api-access-9542p\") pod \"0016dd31-1097-438e-9197-bd3f5c9659d3\" (UID: \"0016dd31-1097-438e-9197-bd3f5c9659d3\") " Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.049269 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0016dd31-1097-438e-9197-bd3f5c9659d3-operator-scripts\") pod \"0016dd31-1097-438e-9197-bd3f5c9659d3\" (UID: \"0016dd31-1097-438e-9197-bd3f5c9659d3\") " Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.049925 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0016dd31-1097-438e-9197-bd3f5c9659d3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0016dd31-1097-438e-9197-bd3f5c9659d3" (UID: "0016dd31-1097-438e-9197-bd3f5c9659d3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.055705 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0016dd31-1097-438e-9197-bd3f5c9659d3-kube-api-access-9542p" (OuterVolumeSpecName: "kube-api-access-9542p") pod "0016dd31-1097-438e-9197-bd3f5c9659d3" (UID: "0016dd31-1097-438e-9197-bd3f5c9659d3"). InnerVolumeSpecName "kube-api-access-9542p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.115961 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-8hph9" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.124666 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-9c10-account-create-h7m7w" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.134666 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-35c8-account-create-5lwlj" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.150355 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d103981-92d4-4a79-a8e7-cf9f82c8135a-operator-scripts\") pod \"9d103981-92d4-4a79-a8e7-cf9f82c8135a\" (UID: \"9d103981-92d4-4a79-a8e7-cf9f82c8135a\") " Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.150415 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c56ef76f-5741-4430-8973-4c035fc82525-operator-scripts\") pod \"c56ef76f-5741-4430-8973-4c035fc82525\" (UID: \"c56ef76f-5741-4430-8973-4c035fc82525\") " Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.150485 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/91ea1a14-92d9-4a15-9cb7-accdb57351b0-operator-scripts\") pod \"91ea1a14-92d9-4a15-9cb7-accdb57351b0\" (UID: \"91ea1a14-92d9-4a15-9cb7-accdb57351b0\") " Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.150538 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-76fxs\" (UniqueName: \"kubernetes.io/projected/9d103981-92d4-4a79-a8e7-cf9f82c8135a-kube-api-access-76fxs\") pod \"9d103981-92d4-4a79-a8e7-cf9f82c8135a\" (UID: \"9d103981-92d4-4a79-a8e7-cf9f82c8135a\") " Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.150631 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7xbs\" (UniqueName: \"kubernetes.io/projected/c56ef76f-5741-4430-8973-4c035fc82525-kube-api-access-q7xbs\") pod \"c56ef76f-5741-4430-8973-4c035fc82525\" (UID: \"c56ef76f-5741-4430-8973-4c035fc82525\") " Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.150729 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-65p59\" (UniqueName: \"kubernetes.io/projected/91ea1a14-92d9-4a15-9cb7-accdb57351b0-kube-api-access-65p59\") pod \"91ea1a14-92d9-4a15-9cb7-accdb57351b0\" (UID: \"91ea1a14-92d9-4a15-9cb7-accdb57351b0\") " Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.150778 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f3ad43f7-b71c-4cd6-ad76-93f881fe820d-operator-scripts\") pod \"f3ad43f7-b71c-4cd6-ad76-93f881fe820d\" (UID: \"f3ad43f7-b71c-4cd6-ad76-93f881fe820d\") " Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.150810 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fxmf9\" (UniqueName: \"kubernetes.io/projected/f3ad43f7-b71c-4cd6-ad76-93f881fe820d-kube-api-access-fxmf9\") pod \"f3ad43f7-b71c-4cd6-ad76-93f881fe820d\" (UID: \"f3ad43f7-b71c-4cd6-ad76-93f881fe820d\") " Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.150883 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d103981-92d4-4a79-a8e7-cf9f82c8135a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9d103981-92d4-4a79-a8e7-cf9f82c8135a" (UID: "9d103981-92d4-4a79-a8e7-cf9f82c8135a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.150909 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c56ef76f-5741-4430-8973-4c035fc82525-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c56ef76f-5741-4430-8973-4c035fc82525" (UID: "c56ef76f-5741-4430-8973-4c035fc82525"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.150943 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91ea1a14-92d9-4a15-9cb7-accdb57351b0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "91ea1a14-92d9-4a15-9cb7-accdb57351b0" (UID: "91ea1a14-92d9-4a15-9cb7-accdb57351b0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.153949 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d103981-92d4-4a79-a8e7-cf9f82c8135a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.154002 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c56ef76f-5741-4430-8973-4c035fc82525-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.154034 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/91ea1a14-92d9-4a15-9cb7-accdb57351b0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.154051 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9542p\" (UniqueName: \"kubernetes.io/projected/0016dd31-1097-438e-9197-bd3f5c9659d3-kube-api-access-9542p\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.154068 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0016dd31-1097-438e-9197-bd3f5c9659d3-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.161307 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d103981-92d4-4a79-a8e7-cf9f82c8135a-kube-api-access-76fxs" (OuterVolumeSpecName: "kube-api-access-76fxs") pod "9d103981-92d4-4a79-a8e7-cf9f82c8135a" (UID: "9d103981-92d4-4a79-a8e7-cf9f82c8135a"). InnerVolumeSpecName "kube-api-access-76fxs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.161971 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3ad43f7-b71c-4cd6-ad76-93f881fe820d-kube-api-access-fxmf9" (OuterVolumeSpecName: "kube-api-access-fxmf9") pod "f3ad43f7-b71c-4cd6-ad76-93f881fe820d" (UID: "f3ad43f7-b71c-4cd6-ad76-93f881fe820d"). InnerVolumeSpecName "kube-api-access-fxmf9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.162856 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f3ad43f7-b71c-4cd6-ad76-93f881fe820d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f3ad43f7-b71c-4cd6-ad76-93f881fe820d" (UID: "f3ad43f7-b71c-4cd6-ad76-93f881fe820d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.165523 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c56ef76f-5741-4430-8973-4c035fc82525-kube-api-access-q7xbs" (OuterVolumeSpecName: "kube-api-access-q7xbs") pod "c56ef76f-5741-4430-8973-4c035fc82525" (UID: "c56ef76f-5741-4430-8973-4c035fc82525"). InnerVolumeSpecName "kube-api-access-q7xbs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.178475 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91ea1a14-92d9-4a15-9cb7-accdb57351b0-kube-api-access-65p59" (OuterVolumeSpecName: "kube-api-access-65p59") pod "91ea1a14-92d9-4a15-9cb7-accdb57351b0" (UID: "91ea1a14-92d9-4a15-9cb7-accdb57351b0"). InnerVolumeSpecName "kube-api-access-65p59". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.254858 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/40c8f747-a116-404e-af9e-85f85a759bed-operator-scripts\") pod \"40c8f747-a116-404e-af9e-85f85a759bed\" (UID: \"40c8f747-a116-404e-af9e-85f85a759bed\") " Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.255080 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbzv6\" (UniqueName: \"kubernetes.io/projected/40c8f747-a116-404e-af9e-85f85a759bed-kube-api-access-bbzv6\") pod \"40c8f747-a116-404e-af9e-85f85a759bed\" (UID: \"40c8f747-a116-404e-af9e-85f85a759bed\") " Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.255640 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f3ad43f7-b71c-4cd6-ad76-93f881fe820d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.255662 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fxmf9\" (UniqueName: \"kubernetes.io/projected/f3ad43f7-b71c-4cd6-ad76-93f881fe820d-kube-api-access-fxmf9\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.255676 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-76fxs\" (UniqueName: \"kubernetes.io/projected/9d103981-92d4-4a79-a8e7-cf9f82c8135a-kube-api-access-76fxs\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.255690 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7xbs\" (UniqueName: \"kubernetes.io/projected/c56ef76f-5741-4430-8973-4c035fc82525-kube-api-access-q7xbs\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.255703 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-65p59\" (UniqueName: \"kubernetes.io/projected/91ea1a14-92d9-4a15-9cb7-accdb57351b0-kube-api-access-65p59\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.256234 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40c8f747-a116-404e-af9e-85f85a759bed-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "40c8f747-a116-404e-af9e-85f85a759bed" (UID: "40c8f747-a116-404e-af9e-85f85a759bed"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.260813 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40c8f747-a116-404e-af9e-85f85a759bed-kube-api-access-bbzv6" (OuterVolumeSpecName: "kube-api-access-bbzv6") pod "40c8f747-a116-404e-af9e-85f85a759bed" (UID: "40c8f747-a116-404e-af9e-85f85a759bed"). InnerVolumeSpecName "kube-api-access-bbzv6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.357271 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/40c8f747-a116-404e-af9e-85f85a759bed-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.357302 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbzv6\" (UniqueName: \"kubernetes.io/projected/40c8f747-a116-404e-af9e-85f85a759bed-kube-api-access-bbzv6\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.604872 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-9c10-account-create-h7m7w" event={"ID":"9d103981-92d4-4a79-a8e7-cf9f82c8135a","Type":"ContainerDied","Data":"e43442925e2f0794da7e466c9ae2eb79142039750b7ff232fa64a60bbb108e56"} Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.604917 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e43442925e2f0794da7e466c9ae2eb79142039750b7ff232fa64a60bbb108e56" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.604937 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-9c10-account-create-h7m7w" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.607038 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8a8a-account-create-w645s" event={"ID":"91ea1a14-92d9-4a15-9cb7-accdb57351b0","Type":"ContainerDied","Data":"da54c5e55f6c4ff8d390a21bb5a44a3e5bb03f4d653370e1cb822c59b05f7bcf"} Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.607070 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da54c5e55f6c4ff8d390a21bb5a44a3e5bb03f4d653370e1cb822c59b05f7bcf" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.607084 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8a8a-account-create-w645s" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.609058 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-35c8-account-create-5lwlj" event={"ID":"40c8f747-a116-404e-af9e-85f85a759bed","Type":"ContainerDied","Data":"1512f291531e716cd2fdde42fc36422c2312b2745aa561ee450684360a16e192"} Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.609093 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1512f291531e716cd2fdde42fc36422c2312b2745aa561ee450684360a16e192" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.609110 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-35c8-account-create-5lwlj" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.610711 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-b4tnp" event={"ID":"2dbb3128-e4f7-4ff1-bc59-7873deed9a52","Type":"ContainerStarted","Data":"6929ed7d7ea24a1330c1fba516d8289152ccdde5a5b8fb164f38c1790497a4e0"} Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.613930 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-bdknn" event={"ID":"c56ef76f-5741-4430-8973-4c035fc82525","Type":"ContainerDied","Data":"4fdc8cbcf19def172cb1ffb7d3572f841937b7ba2547d62003420c0a1ffe27b9"} Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.613961 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4fdc8cbcf19def172cb1ffb7d3572f841937b7ba2547d62003420c0a1ffe27b9" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.613983 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-bdknn" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.615729 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-h8pnd" event={"ID":"0016dd31-1097-438e-9197-bd3f5c9659d3","Type":"ContainerDied","Data":"47dbd718e091cb2c624116a7a1689cdf373d020d90b2365c6dc13be2d8c42497"} Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.615763 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="47dbd718e091cb2c624116a7a1689cdf373d020d90b2365c6dc13be2d8c42497" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.615810 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-h8pnd" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.620449 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-8hph9" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.620447 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-8hph9" event={"ID":"f3ad43f7-b71c-4cd6-ad76-93f881fe820d","Type":"ContainerDied","Data":"ca15db05fe55ae849bc3d0c882552e2e0b0e4e13e8c742ea9f816bee53842126"} Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.620537 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca15db05fe55ae849bc3d0c882552e2e0b0e4e13e8c742ea9f816bee53842126" Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.623207 5039 generic.go:334] "Generic (PLEG): container finished" podID="54819035-007f-4162-9419-d825f50e1ce9" containerID="1b312f134f89d05e7839efb5def50b5a1a1cd6499ce0c7463186baf1f396963f" exitCode=0 Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.623249 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"54819035-007f-4162-9419-d825f50e1ce9","Type":"ContainerDied","Data":"1b312f134f89d05e7839efb5def50b5a1a1cd6499ce0c7463186baf1f396963f"} Nov 24 13:39:36 crc kubenswrapper[5039]: I1124 13:39:36.629952 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-b4tnp" podStartSLOduration=2.979195718 podStartE2EDuration="8.629932947s" podCreationTimestamp="2025-11-24 13:39:28 +0000 UTC" firstStartedPulling="2025-11-24 13:39:30.228724832 +0000 UTC m=+1282.667849332" lastFinishedPulling="2025-11-24 13:39:35.879462061 +0000 UTC m=+1288.318586561" observedRunningTime="2025-11-24 13:39:36.626398801 +0000 UTC m=+1289.065523311" watchObservedRunningTime="2025-11-24 13:39:36.629932947 +0000 UTC m=+1289.069057447" Nov 24 13:39:37 crc kubenswrapper[5039]: I1124 13:39:37.637869 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"54819035-007f-4162-9419-d825f50e1ce9","Type":"ContainerStarted","Data":"0ab78ff41d49129d36c4f157977c93a4bd0bd84345dcc46701f4f2839a2b5333"} Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.413287 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Nov 24 13:39:39 crc kubenswrapper[5039]: E1124 13:39:39.414025 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d373e37d-246d-4b90-863b-b224a059c4e1" containerName="mariadb-account-create" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.414038 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="d373e37d-246d-4b90-863b-b224a059c4e1" containerName="mariadb-account-create" Nov 24 13:39:39 crc kubenswrapper[5039]: E1124 13:39:39.414061 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40c8f747-a116-404e-af9e-85f85a759bed" containerName="mariadb-account-create" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.414067 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="40c8f747-a116-404e-af9e-85f85a759bed" containerName="mariadb-account-create" Nov 24 13:39:39 crc kubenswrapper[5039]: E1124 13:39:39.414077 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b861c52a-7fd3-4027-931a-624b4149e21b" containerName="mariadb-account-create" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.414083 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b861c52a-7fd3-4027-931a-624b4149e21b" containerName="mariadb-account-create" Nov 24 13:39:39 crc kubenswrapper[5039]: E1124 13:39:39.414091 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0016dd31-1097-438e-9197-bd3f5c9659d3" containerName="mariadb-database-create" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.414097 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0016dd31-1097-438e-9197-bd3f5c9659d3" containerName="mariadb-database-create" Nov 24 13:39:39 crc kubenswrapper[5039]: E1124 13:39:39.414106 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91ea1a14-92d9-4a15-9cb7-accdb57351b0" containerName="mariadb-account-create" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.414113 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="91ea1a14-92d9-4a15-9cb7-accdb57351b0" containerName="mariadb-account-create" Nov 24 13:39:39 crc kubenswrapper[5039]: E1124 13:39:39.414129 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d103981-92d4-4a79-a8e7-cf9f82c8135a" containerName="mariadb-account-create" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.414135 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d103981-92d4-4a79-a8e7-cf9f82c8135a" containerName="mariadb-account-create" Nov 24 13:39:39 crc kubenswrapper[5039]: E1124 13:39:39.414149 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="739c6211-e4b4-4386-9d63-c9b680eb9114" containerName="mariadb-database-create" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.414155 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="739c6211-e4b4-4386-9d63-c9b680eb9114" containerName="mariadb-database-create" Nov 24 13:39:39 crc kubenswrapper[5039]: E1124 13:39:39.414163 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c56ef76f-5741-4430-8973-4c035fc82525" containerName="mariadb-database-create" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.414169 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="c56ef76f-5741-4430-8973-4c035fc82525" containerName="mariadb-database-create" Nov 24 13:39:39 crc kubenswrapper[5039]: E1124 13:39:39.414181 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f09296a1-0a30-4fb4-ba9f-c4744066800b" containerName="mariadb-database-create" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.414186 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="f09296a1-0a30-4fb4-ba9f-c4744066800b" containerName="mariadb-database-create" Nov 24 13:39:39 crc kubenswrapper[5039]: E1124 13:39:39.414203 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3ad43f7-b71c-4cd6-ad76-93f881fe820d" containerName="mariadb-database-create" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.414209 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3ad43f7-b71c-4cd6-ad76-93f881fe820d" containerName="mariadb-database-create" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.414387 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="d373e37d-246d-4b90-863b-b224a059c4e1" containerName="mariadb-account-create" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.414401 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="739c6211-e4b4-4386-9d63-c9b680eb9114" containerName="mariadb-database-create" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.414409 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="91ea1a14-92d9-4a15-9cb7-accdb57351b0" containerName="mariadb-account-create" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.414422 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="f09296a1-0a30-4fb4-ba9f-c4744066800b" containerName="mariadb-database-create" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.414433 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="c56ef76f-5741-4430-8973-4c035fc82525" containerName="mariadb-database-create" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.414446 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0016dd31-1097-438e-9197-bd3f5c9659d3" containerName="mariadb-database-create" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.414457 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="40c8f747-a116-404e-af9e-85f85a759bed" containerName="mariadb-account-create" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.414469 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3ad43f7-b71c-4cd6-ad76-93f881fe820d" containerName="mariadb-database-create" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.414482 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="b861c52a-7fd3-4027-931a-624b4149e21b" containerName="mariadb-account-create" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.414490 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d103981-92d4-4a79-a8e7-cf9f82c8135a" containerName="mariadb-account-create" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.415199 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.418288 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.425975 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.473112 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.564043 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-x8p79"] Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.564319 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-x8p79" podUID="8bf2c80f-7e58-4be8-b373-6ddc3b0efb97" containerName="dnsmasq-dns" containerID="cri-o://265cfdeb3221f1fbe3ac727c6c9ddc2260a0654a27e4ee87db3f1cfd7854c3dd" gracePeriod=10 Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.613822 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/699baf57-b50c-43fd-adc9-7ff6333294df-config-data\") pod \"mysqld-exporter-0\" (UID: \"699baf57-b50c-43fd-adc9-7ff6333294df\") " pod="openstack/mysqld-exporter-0" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.613921 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/699baf57-b50c-43fd-adc9-7ff6333294df-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"699baf57-b50c-43fd-adc9-7ff6333294df\") " pod="openstack/mysqld-exporter-0" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.614099 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrcvq\" (UniqueName: \"kubernetes.io/projected/699baf57-b50c-43fd-adc9-7ff6333294df-kube-api-access-wrcvq\") pod \"mysqld-exporter-0\" (UID: \"699baf57-b50c-43fd-adc9-7ff6333294df\") " pod="openstack/mysqld-exporter-0" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.716910 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrcvq\" (UniqueName: \"kubernetes.io/projected/699baf57-b50c-43fd-adc9-7ff6333294df-kube-api-access-wrcvq\") pod \"mysqld-exporter-0\" (UID: \"699baf57-b50c-43fd-adc9-7ff6333294df\") " pod="openstack/mysqld-exporter-0" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.717113 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/699baf57-b50c-43fd-adc9-7ff6333294df-config-data\") pod \"mysqld-exporter-0\" (UID: \"699baf57-b50c-43fd-adc9-7ff6333294df\") " pod="openstack/mysqld-exporter-0" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.717159 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/699baf57-b50c-43fd-adc9-7ff6333294df-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"699baf57-b50c-43fd-adc9-7ff6333294df\") " pod="openstack/mysqld-exporter-0" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.735265 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/699baf57-b50c-43fd-adc9-7ff6333294df-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"699baf57-b50c-43fd-adc9-7ff6333294df\") " pod="openstack/mysqld-exporter-0" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.735308 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/699baf57-b50c-43fd-adc9-7ff6333294df-config-data\") pod \"mysqld-exporter-0\" (UID: \"699baf57-b50c-43fd-adc9-7ff6333294df\") " pod="openstack/mysqld-exporter-0" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.735984 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrcvq\" (UniqueName: \"kubernetes.io/projected/699baf57-b50c-43fd-adc9-7ff6333294df-kube-api-access-wrcvq\") pod \"mysqld-exporter-0\" (UID: \"699baf57-b50c-43fd-adc9-7ff6333294df\") " pod="openstack/mysqld-exporter-0" Nov 24 13:39:39 crc kubenswrapper[5039]: I1124 13:39:39.753854 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.406704 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 24 13:39:40 crc kubenswrapper[5039]: W1124 13:39:40.410428 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod699baf57_b50c_43fd_adc9_7ff6333294df.slice/crio-5916b606db685d7a703ece6fbdec26b788140e571d42253420b43a1c8fa864f4 WatchSource:0}: Error finding container 5916b606db685d7a703ece6fbdec26b788140e571d42253420b43a1c8fa864f4: Status 404 returned error can't find the container with id 5916b606db685d7a703ece6fbdec26b788140e571d42253420b43a1c8fa864f4 Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.448313 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-x8p79" Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.546042 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-dns-svc\") pod \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\" (UID: \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\") " Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.546113 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-config\") pod \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\" (UID: \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\") " Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.546192 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-ovsdbserver-sb\") pod \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\" (UID: \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\") " Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.546272 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4fml\" (UniqueName: \"kubernetes.io/projected/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-kube-api-access-s4fml\") pod \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\" (UID: \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\") " Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.546442 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-ovsdbserver-nb\") pod \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\" (UID: \"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97\") " Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.554060 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-kube-api-access-s4fml" (OuterVolumeSpecName: "kube-api-access-s4fml") pod "8bf2c80f-7e58-4be8-b373-6ddc3b0efb97" (UID: "8bf2c80f-7e58-4be8-b373-6ddc3b0efb97"). InnerVolumeSpecName "kube-api-access-s4fml". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.593918 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-config" (OuterVolumeSpecName: "config") pod "8bf2c80f-7e58-4be8-b373-6ddc3b0efb97" (UID: "8bf2c80f-7e58-4be8-b373-6ddc3b0efb97"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.597607 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8bf2c80f-7e58-4be8-b373-6ddc3b0efb97" (UID: "8bf2c80f-7e58-4be8-b373-6ddc3b0efb97"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.607285 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8bf2c80f-7e58-4be8-b373-6ddc3b0efb97" (UID: "8bf2c80f-7e58-4be8-b373-6ddc3b0efb97"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.608050 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8bf2c80f-7e58-4be8-b373-6ddc3b0efb97" (UID: "8bf2c80f-7e58-4be8-b373-6ddc3b0efb97"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.648965 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.649001 5039 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.649017 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.649029 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.649039 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4fml\" (UniqueName: \"kubernetes.io/projected/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97-kube-api-access-s4fml\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.690718 5039 generic.go:334] "Generic (PLEG): container finished" podID="2dbb3128-e4f7-4ff1-bc59-7873deed9a52" containerID="6929ed7d7ea24a1330c1fba516d8289152ccdde5a5b8fb164f38c1790497a4e0" exitCode=0 Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.690793 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-b4tnp" event={"ID":"2dbb3128-e4f7-4ff1-bc59-7873deed9a52","Type":"ContainerDied","Data":"6929ed7d7ea24a1330c1fba516d8289152ccdde5a5b8fb164f38c1790497a4e0"} Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.693125 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"699baf57-b50c-43fd-adc9-7ff6333294df","Type":"ContainerStarted","Data":"5916b606db685d7a703ece6fbdec26b788140e571d42253420b43a1c8fa864f4"} Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.696739 5039 generic.go:334] "Generic (PLEG): container finished" podID="8bf2c80f-7e58-4be8-b373-6ddc3b0efb97" containerID="265cfdeb3221f1fbe3ac727c6c9ddc2260a0654a27e4ee87db3f1cfd7854c3dd" exitCode=0 Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.696818 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-x8p79" event={"ID":"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97","Type":"ContainerDied","Data":"265cfdeb3221f1fbe3ac727c6c9ddc2260a0654a27e4ee87db3f1cfd7854c3dd"} Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.696849 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-x8p79" Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.696878 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-x8p79" event={"ID":"8bf2c80f-7e58-4be8-b373-6ddc3b0efb97","Type":"ContainerDied","Data":"3492438d70fa7dffd467593b968240b9c55b6252e9750f3eca1b29a489d641b0"} Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.696909 5039 scope.go:117] "RemoveContainer" containerID="265cfdeb3221f1fbe3ac727c6c9ddc2260a0654a27e4ee87db3f1cfd7854c3dd" Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.743023 5039 scope.go:117] "RemoveContainer" containerID="5555d5d66cf9ae3f3e72405ca6998cca456b0e969a29403b6cd98d7412cca701" Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.758522 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-x8p79"] Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.765328 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-x8p79"] Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.815616 5039 scope.go:117] "RemoveContainer" containerID="265cfdeb3221f1fbe3ac727c6c9ddc2260a0654a27e4ee87db3f1cfd7854c3dd" Nov 24 13:39:40 crc kubenswrapper[5039]: E1124 13:39:40.820696 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"265cfdeb3221f1fbe3ac727c6c9ddc2260a0654a27e4ee87db3f1cfd7854c3dd\": container with ID starting with 265cfdeb3221f1fbe3ac727c6c9ddc2260a0654a27e4ee87db3f1cfd7854c3dd not found: ID does not exist" containerID="265cfdeb3221f1fbe3ac727c6c9ddc2260a0654a27e4ee87db3f1cfd7854c3dd" Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.820773 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"265cfdeb3221f1fbe3ac727c6c9ddc2260a0654a27e4ee87db3f1cfd7854c3dd"} err="failed to get container status \"265cfdeb3221f1fbe3ac727c6c9ddc2260a0654a27e4ee87db3f1cfd7854c3dd\": rpc error: code = NotFound desc = could not find container \"265cfdeb3221f1fbe3ac727c6c9ddc2260a0654a27e4ee87db3f1cfd7854c3dd\": container with ID starting with 265cfdeb3221f1fbe3ac727c6c9ddc2260a0654a27e4ee87db3f1cfd7854c3dd not found: ID does not exist" Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.820802 5039 scope.go:117] "RemoveContainer" containerID="5555d5d66cf9ae3f3e72405ca6998cca456b0e969a29403b6cd98d7412cca701" Nov 24 13:39:40 crc kubenswrapper[5039]: E1124 13:39:40.821193 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5555d5d66cf9ae3f3e72405ca6998cca456b0e969a29403b6cd98d7412cca701\": container with ID starting with 5555d5d66cf9ae3f3e72405ca6998cca456b0e969a29403b6cd98d7412cca701 not found: ID does not exist" containerID="5555d5d66cf9ae3f3e72405ca6998cca456b0e969a29403b6cd98d7412cca701" Nov 24 13:39:40 crc kubenswrapper[5039]: I1124 13:39:40.821261 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5555d5d66cf9ae3f3e72405ca6998cca456b0e969a29403b6cd98d7412cca701"} err="failed to get container status \"5555d5d66cf9ae3f3e72405ca6998cca456b0e969a29403b6cd98d7412cca701\": rpc error: code = NotFound desc = could not find container \"5555d5d66cf9ae3f3e72405ca6998cca456b0e969a29403b6cd98d7412cca701\": container with ID starting with 5555d5d66cf9ae3f3e72405ca6998cca456b0e969a29403b6cd98d7412cca701 not found: ID does not exist" Nov 24 13:39:41 crc kubenswrapper[5039]: I1124 13:39:41.710566 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"54819035-007f-4162-9419-d825f50e1ce9","Type":"ContainerStarted","Data":"53e740450c11dd9ab7fbbcc6a9fb6c9dea422b6418f01a793dedaafefbe5545d"} Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.091287 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-b4tnp" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.177060 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2mct\" (UniqueName: \"kubernetes.io/projected/2dbb3128-e4f7-4ff1-bc59-7873deed9a52-kube-api-access-t2mct\") pod \"2dbb3128-e4f7-4ff1-bc59-7873deed9a52\" (UID: \"2dbb3128-e4f7-4ff1-bc59-7873deed9a52\") " Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.177328 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2dbb3128-e4f7-4ff1-bc59-7873deed9a52-config-data\") pod \"2dbb3128-e4f7-4ff1-bc59-7873deed9a52\" (UID: \"2dbb3128-e4f7-4ff1-bc59-7873deed9a52\") " Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.177481 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dbb3128-e4f7-4ff1-bc59-7873deed9a52-combined-ca-bundle\") pod \"2dbb3128-e4f7-4ff1-bc59-7873deed9a52\" (UID: \"2dbb3128-e4f7-4ff1-bc59-7873deed9a52\") " Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.184184 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2dbb3128-e4f7-4ff1-bc59-7873deed9a52-kube-api-access-t2mct" (OuterVolumeSpecName: "kube-api-access-t2mct") pod "2dbb3128-e4f7-4ff1-bc59-7873deed9a52" (UID: "2dbb3128-e4f7-4ff1-bc59-7873deed9a52"). InnerVolumeSpecName "kube-api-access-t2mct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.217809 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2dbb3128-e4f7-4ff1-bc59-7873deed9a52-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2dbb3128-e4f7-4ff1-bc59-7873deed9a52" (UID: "2dbb3128-e4f7-4ff1-bc59-7873deed9a52"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.234265 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2dbb3128-e4f7-4ff1-bc59-7873deed9a52-config-data" (OuterVolumeSpecName: "config-data") pod "2dbb3128-e4f7-4ff1-bc59-7873deed9a52" (UID: "2dbb3128-e4f7-4ff1-bc59-7873deed9a52"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.280268 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2mct\" (UniqueName: \"kubernetes.io/projected/2dbb3128-e4f7-4ff1-bc59-7873deed9a52-kube-api-access-t2mct\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.280328 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2dbb3128-e4f7-4ff1-bc59-7873deed9a52-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.280339 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dbb3128-e4f7-4ff1-bc59-7873deed9a52-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.321580 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8bf2c80f-7e58-4be8-b373-6ddc3b0efb97" path="/var/lib/kubelet/pods/8bf2c80f-7e58-4be8-b373-6ddc3b0efb97/volumes" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.729754 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"54819035-007f-4162-9419-d825f50e1ce9","Type":"ContainerStarted","Data":"d9f11aa72251a455fbdbd3a3dff93d0fe1b4d2b4f00f4a5043f2ee1f385bad3d"} Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.734698 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-b4tnp" event={"ID":"2dbb3128-e4f7-4ff1-bc59-7873deed9a52","Type":"ContainerDied","Data":"818d6024d44fb9d6e4e75fd4dc93e0c7108139aa0637b19e900772d5f4c97acb"} Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.734780 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="818d6024d44fb9d6e4e75fd4dc93e0c7108139aa0637b19e900772d5f4c97acb" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.734856 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-b4tnp" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.777922 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=16.777886118 podStartE2EDuration="16.777886118s" podCreationTimestamp="2025-11-24 13:39:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:39:42.765343892 +0000 UTC m=+1295.204468422" watchObservedRunningTime="2025-11-24 13:39:42.777886118 +0000 UTC m=+1295.217010668" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.967830 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-kw8fc"] Nov 24 13:39:42 crc kubenswrapper[5039]: E1124 13:39:42.968213 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bf2c80f-7e58-4be8-b373-6ddc3b0efb97" containerName="dnsmasq-dns" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.968234 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bf2c80f-7e58-4be8-b373-6ddc3b0efb97" containerName="dnsmasq-dns" Nov 24 13:39:42 crc kubenswrapper[5039]: E1124 13:39:42.968247 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bf2c80f-7e58-4be8-b373-6ddc3b0efb97" containerName="init" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.968253 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bf2c80f-7e58-4be8-b373-6ddc3b0efb97" containerName="init" Nov 24 13:39:42 crc kubenswrapper[5039]: E1124 13:39:42.968266 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dbb3128-e4f7-4ff1-bc59-7873deed9a52" containerName="keystone-db-sync" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.968273 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dbb3128-e4f7-4ff1-bc59-7873deed9a52" containerName="keystone-db-sync" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.968479 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bf2c80f-7e58-4be8-b373-6ddc3b0efb97" containerName="dnsmasq-dns" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.968513 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="2dbb3128-e4f7-4ff1-bc59-7873deed9a52" containerName="keystone-db-sync" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.969574 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.981033 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-kw8fc"] Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.996287 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-ovsdbserver-nb\") pod \"dnsmasq-dns-55fff446b9-kw8fc\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.996545 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4k8f8\" (UniqueName: \"kubernetes.io/projected/24c51914-29d4-40bb-b6a5-e14aac592ce3-kube-api-access-4k8f8\") pod \"dnsmasq-dns-55fff446b9-kw8fc\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.996638 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-config\") pod \"dnsmasq-dns-55fff446b9-kw8fc\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.996841 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-ovsdbserver-sb\") pod \"dnsmasq-dns-55fff446b9-kw8fc\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.996942 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-dns-swift-storage-0\") pod \"dnsmasq-dns-55fff446b9-kw8fc\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:42 crc kubenswrapper[5039]: I1124 13:39:42.997031 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-dns-svc\") pod \"dnsmasq-dns-55fff446b9-kw8fc\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.024349 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-n7h6g"] Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.025638 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.029567 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.029809 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.030004 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-jsx5z" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.030149 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.030877 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.078069 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-n7h6g"] Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.097708 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-combined-ca-bundle\") pod \"keystone-bootstrap-n7h6g\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.097778 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-ovsdbserver-sb\") pod \"dnsmasq-dns-55fff446b9-kw8fc\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.097845 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-dns-swift-storage-0\") pod \"dnsmasq-dns-55fff446b9-kw8fc\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.097878 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-dns-svc\") pod \"dnsmasq-dns-55fff446b9-kw8fc\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.097933 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-ovsdbserver-nb\") pod \"dnsmasq-dns-55fff446b9-kw8fc\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.097959 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4k8f8\" (UniqueName: \"kubernetes.io/projected/24c51914-29d4-40bb-b6a5-e14aac592ce3-kube-api-access-4k8f8\") pod \"dnsmasq-dns-55fff446b9-kw8fc\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.097984 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-config\") pod \"dnsmasq-dns-55fff446b9-kw8fc\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.098015 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-config-data\") pod \"keystone-bootstrap-n7h6g\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.098048 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-scripts\") pod \"keystone-bootstrap-n7h6g\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.098075 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rknfk\" (UniqueName: \"kubernetes.io/projected/92c678de-53b6-450f-a106-3ec37705ea3b-kube-api-access-rknfk\") pod \"keystone-bootstrap-n7h6g\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.098101 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-fernet-keys\") pod \"keystone-bootstrap-n7h6g\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.098182 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-credential-keys\") pod \"keystone-bootstrap-n7h6g\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.099139 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-ovsdbserver-sb\") pod \"dnsmasq-dns-55fff446b9-kw8fc\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.099783 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-dns-swift-storage-0\") pod \"dnsmasq-dns-55fff446b9-kw8fc\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.099852 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-dns-svc\") pod \"dnsmasq-dns-55fff446b9-kw8fc\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.100050 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-ovsdbserver-nb\") pod \"dnsmasq-dns-55fff446b9-kw8fc\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.100336 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-config\") pod \"dnsmasq-dns-55fff446b9-kw8fc\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.143664 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4k8f8\" (UniqueName: \"kubernetes.io/projected/24c51914-29d4-40bb-b6a5-e14aac592ce3-kube-api-access-4k8f8\") pod \"dnsmasq-dns-55fff446b9-kw8fc\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.177432 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-qvrwf"] Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.179145 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-qvrwf" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.184472 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-wclsb" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.184704 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.208767 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-config-data\") pod \"keystone-bootstrap-n7h6g\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.209034 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-scripts\") pod \"keystone-bootstrap-n7h6g\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.209112 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rknfk\" (UniqueName: \"kubernetes.io/projected/92c678de-53b6-450f-a106-3ec37705ea3b-kube-api-access-rknfk\") pod \"keystone-bootstrap-n7h6g\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.209182 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-fernet-keys\") pod \"keystone-bootstrap-n7h6g\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.209336 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-credential-keys\") pod \"keystone-bootstrap-n7h6g\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.209441 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-combined-ca-bundle\") pod \"keystone-bootstrap-n7h6g\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.211915 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-qvrwf"] Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.217778 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-combined-ca-bundle\") pod \"keystone-bootstrap-n7h6g\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.218849 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-config-data\") pod \"keystone-bootstrap-n7h6g\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.230471 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-wgc6v"] Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.233034 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-wgc6v" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.239135 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-scripts\") pod \"keystone-bootstrap-n7h6g\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.243648 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-fernet-keys\") pod \"keystone-bootstrap-n7h6g\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.244013 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.244263 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-h6hqs" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.244492 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.246279 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rknfk\" (UniqueName: \"kubernetes.io/projected/92c678de-53b6-450f-a106-3ec37705ea3b-kube-api-access-rknfk\") pod \"keystone-bootstrap-n7h6g\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.246902 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-credential-keys\") pod \"keystone-bootstrap-n7h6g\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.286769 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-47dzd"] Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.291396 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-47dzd" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.294289 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.300690 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.319898 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.349043 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-47dzd"] Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.357921 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-cnvk2" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.366154 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c0313ce-4944-4fad-bce0-47d60b273f69-config-data\") pod \"heat-db-sync-qvrwf\" (UID: \"7c0313ce-4944-4fad-bce0-47d60b273f69\") " pod="openstack/heat-db-sync-qvrwf" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.366402 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c0313ce-4944-4fad-bce0-47d60b273f69-combined-ca-bundle\") pod \"heat-db-sync-qvrwf\" (UID: \"7c0313ce-4944-4fad-bce0-47d60b273f69\") " pod="openstack/heat-db-sync-qvrwf" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.366482 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5smd\" (UniqueName: \"kubernetes.io/projected/7c0313ce-4944-4fad-bce0-47d60b273f69-kube-api-access-w5smd\") pod \"heat-db-sync-qvrwf\" (UID: \"7c0313ce-4944-4fad-bce0-47d60b273f69\") " pod="openstack/heat-db-sync-qvrwf" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.367064 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.392300 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-wgc6v"] Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.465784 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-x49zw"] Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.467798 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-x49zw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.476297 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-grn48" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.476662 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.476695 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3938495-f119-4641-b76b-0333c1391b24-combined-ca-bundle\") pod \"neutron-db-sync-wgc6v\" (UID: \"a3938495-f119-4641-b76b-0333c1391b24\") " pod="openstack/neutron-db-sync-wgc6v" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.476948 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-db-sync-config-data\") pod \"cinder-db-sync-47dzd\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " pod="openstack/cinder-db-sync-47dzd" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.476983 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c0313ce-4944-4fad-bce0-47d60b273f69-config-data\") pod \"heat-db-sync-qvrwf\" (UID: \"7c0313ce-4944-4fad-bce0-47d60b273f69\") " pod="openstack/heat-db-sync-qvrwf" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.482688 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxkfs\" (UniqueName: \"kubernetes.io/projected/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-kube-api-access-gxkfs\") pod \"cinder-db-sync-47dzd\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " pod="openstack/cinder-db-sync-47dzd" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.482844 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c0313ce-4944-4fad-bce0-47d60b273f69-combined-ca-bundle\") pod \"heat-db-sync-qvrwf\" (UID: \"7c0313ce-4944-4fad-bce0-47d60b273f69\") " pod="openstack/heat-db-sync-qvrwf" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.482895 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-etc-machine-id\") pod \"cinder-db-sync-47dzd\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " pod="openstack/cinder-db-sync-47dzd" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.482967 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5smd\" (UniqueName: \"kubernetes.io/projected/7c0313ce-4944-4fad-bce0-47d60b273f69-kube-api-access-w5smd\") pod \"heat-db-sync-qvrwf\" (UID: \"7c0313ce-4944-4fad-bce0-47d60b273f69\") " pod="openstack/heat-db-sync-qvrwf" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.483006 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-scripts\") pod \"cinder-db-sync-47dzd\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " pod="openstack/cinder-db-sync-47dzd" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.483093 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a3938495-f119-4641-b76b-0333c1391b24-config\") pod \"neutron-db-sync-wgc6v\" (UID: \"a3938495-f119-4641-b76b-0333c1391b24\") " pod="openstack/neutron-db-sync-wgc6v" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.483148 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-combined-ca-bundle\") pod \"cinder-db-sync-47dzd\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " pod="openstack/cinder-db-sync-47dzd" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.483162 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5d2jk\" (UniqueName: \"kubernetes.io/projected/a3938495-f119-4641-b76b-0333c1391b24-kube-api-access-5d2jk\") pod \"neutron-db-sync-wgc6v\" (UID: \"a3938495-f119-4641-b76b-0333c1391b24\") " pod="openstack/neutron-db-sync-wgc6v" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.483197 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-config-data\") pod \"cinder-db-sync-47dzd\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " pod="openstack/cinder-db-sync-47dzd" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.490054 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-dvmqw"] Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.492136 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c0313ce-4944-4fad-bce0-47d60b273f69-config-data\") pod \"heat-db-sync-qvrwf\" (UID: \"7c0313ce-4944-4fad-bce0-47d60b273f69\") " pod="openstack/heat-db-sync-qvrwf" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.500206 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c0313ce-4944-4fad-bce0-47d60b273f69-combined-ca-bundle\") pod \"heat-db-sync-qvrwf\" (UID: \"7c0313ce-4944-4fad-bce0-47d60b273f69\") " pod="openstack/heat-db-sync-qvrwf" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.517765 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-x49zw"] Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.517876 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-dvmqw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.523286 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5smd\" (UniqueName: \"kubernetes.io/projected/7c0313ce-4944-4fad-bce0-47d60b273f69-kube-api-access-w5smd\") pod \"heat-db-sync-qvrwf\" (UID: \"7c0313ce-4944-4fad-bce0-47d60b273f69\") " pod="openstack/heat-db-sync-qvrwf" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.527477 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.527676 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-78ffd" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.527782 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.541181 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-dvmqw"] Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.555248 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-kw8fc"] Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.566591 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-sw4mc"] Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.568564 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.579203 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-sw4mc"] Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.584549 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-scripts\") pod \"cinder-db-sync-47dzd\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " pod="openstack/cinder-db-sync-47dzd" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.584611 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4ab794c0-2264-4041-b697-ef7829a5129a-db-sync-config-data\") pod \"barbican-db-sync-x49zw\" (UID: \"4ab794c0-2264-4041-b697-ef7829a5129a\") " pod="openstack/barbican-db-sync-x49zw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.584635 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a3938495-f119-4641-b76b-0333c1391b24-config\") pod \"neutron-db-sync-wgc6v\" (UID: \"a3938495-f119-4641-b76b-0333c1391b24\") " pod="openstack/neutron-db-sync-wgc6v" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.584668 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-combined-ca-bundle\") pod \"cinder-db-sync-47dzd\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " pod="openstack/cinder-db-sync-47dzd" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.584686 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5d2jk\" (UniqueName: \"kubernetes.io/projected/a3938495-f119-4641-b76b-0333c1391b24-kube-api-access-5d2jk\") pod \"neutron-db-sync-wgc6v\" (UID: \"a3938495-f119-4641-b76b-0333c1391b24\") " pod="openstack/neutron-db-sync-wgc6v" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.584705 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-config-data\") pod \"cinder-db-sync-47dzd\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " pod="openstack/cinder-db-sync-47dzd" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.584725 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3938495-f119-4641-b76b-0333c1391b24-combined-ca-bundle\") pod \"neutron-db-sync-wgc6v\" (UID: \"a3938495-f119-4641-b76b-0333c1391b24\") " pod="openstack/neutron-db-sync-wgc6v" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.584752 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-db-sync-config-data\") pod \"cinder-db-sync-47dzd\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " pod="openstack/cinder-db-sync-47dzd" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.584802 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ab794c0-2264-4041-b697-ef7829a5129a-combined-ca-bundle\") pod \"barbican-db-sync-x49zw\" (UID: \"4ab794c0-2264-4041-b697-ef7829a5129a\") " pod="openstack/barbican-db-sync-x49zw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.584819 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxkfs\" (UniqueName: \"kubernetes.io/projected/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-kube-api-access-gxkfs\") pod \"cinder-db-sync-47dzd\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " pod="openstack/cinder-db-sync-47dzd" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.584856 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhlfq\" (UniqueName: \"kubernetes.io/projected/4ab794c0-2264-4041-b697-ef7829a5129a-kube-api-access-dhlfq\") pod \"barbican-db-sync-x49zw\" (UID: \"4ab794c0-2264-4041-b697-ef7829a5129a\") " pod="openstack/barbican-db-sync-x49zw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.584893 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-etc-machine-id\") pod \"cinder-db-sync-47dzd\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " pod="openstack/cinder-db-sync-47dzd" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.584974 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-etc-machine-id\") pod \"cinder-db-sync-47dzd\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " pod="openstack/cinder-db-sync-47dzd" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.591728 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-config-data\") pod \"cinder-db-sync-47dzd\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " pod="openstack/cinder-db-sync-47dzd" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.593837 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/a3938495-f119-4641-b76b-0333c1391b24-config\") pod \"neutron-db-sync-wgc6v\" (UID: \"a3938495-f119-4641-b76b-0333c1391b24\") " pod="openstack/neutron-db-sync-wgc6v" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.595021 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-scripts\") pod \"cinder-db-sync-47dzd\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " pod="openstack/cinder-db-sync-47dzd" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.601526 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-db-sync-config-data\") pod \"cinder-db-sync-47dzd\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " pod="openstack/cinder-db-sync-47dzd" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.604026 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3938495-f119-4641-b76b-0333c1391b24-combined-ca-bundle\") pod \"neutron-db-sync-wgc6v\" (UID: \"a3938495-f119-4641-b76b-0333c1391b24\") " pod="openstack/neutron-db-sync-wgc6v" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.616710 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5d2jk\" (UniqueName: \"kubernetes.io/projected/a3938495-f119-4641-b76b-0333c1391b24-kube-api-access-5d2jk\") pod \"neutron-db-sync-wgc6v\" (UID: \"a3938495-f119-4641-b76b-0333c1391b24\") " pod="openstack/neutron-db-sync-wgc6v" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.618154 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-combined-ca-bundle\") pod \"cinder-db-sync-47dzd\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " pod="openstack/cinder-db-sync-47dzd" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.622098 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxkfs\" (UniqueName: \"kubernetes.io/projected/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-kube-api-access-gxkfs\") pod \"cinder-db-sync-47dzd\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " pod="openstack/cinder-db-sync-47dzd" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.637141 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.639435 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.644860 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.646556 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.646640 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.658991 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-qvrwf" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.686253 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1577e002-5267-48f4-b292-158ebed8410c-config-data\") pod \"placement-db-sync-dvmqw\" (UID: \"1577e002-5267-48f4-b292-158ebed8410c\") " pod="openstack/placement-db-sync-dvmqw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.686323 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ab794c0-2264-4041-b697-ef7829a5129a-combined-ca-bundle\") pod \"barbican-db-sync-x49zw\" (UID: \"4ab794c0-2264-4041-b697-ef7829a5129a\") " pod="openstack/barbican-db-sync-x49zw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.686350 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxhvh\" (UniqueName: \"kubernetes.io/projected/1577e002-5267-48f4-b292-158ebed8410c-kube-api-access-kxhvh\") pod \"placement-db-sync-dvmqw\" (UID: \"1577e002-5267-48f4-b292-158ebed8410c\") " pod="openstack/placement-db-sync-dvmqw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.686393 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhlfq\" (UniqueName: \"kubernetes.io/projected/4ab794c0-2264-4041-b697-ef7829a5129a-kube-api-access-dhlfq\") pod \"barbican-db-sync-x49zw\" (UID: \"4ab794c0-2264-4041-b697-ef7829a5129a\") " pod="openstack/barbican-db-sync-x49zw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.686412 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-ovsdbserver-nb\") pod \"dnsmasq-dns-76fcf4b695-sw4mc\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.686462 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wwgg\" (UniqueName: \"kubernetes.io/projected/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-kube-api-access-2wwgg\") pod \"dnsmasq-dns-76fcf4b695-sw4mc\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.686482 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-dns-svc\") pod \"dnsmasq-dns-76fcf4b695-sw4mc\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.686702 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-dns-swift-storage-0\") pod \"dnsmasq-dns-76fcf4b695-sw4mc\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.686734 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-ovsdbserver-sb\") pod \"dnsmasq-dns-76fcf4b695-sw4mc\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.686753 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4ab794c0-2264-4041-b697-ef7829a5129a-db-sync-config-data\") pod \"barbican-db-sync-x49zw\" (UID: \"4ab794c0-2264-4041-b697-ef7829a5129a\") " pod="openstack/barbican-db-sync-x49zw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.686784 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1577e002-5267-48f4-b292-158ebed8410c-combined-ca-bundle\") pod \"placement-db-sync-dvmqw\" (UID: \"1577e002-5267-48f4-b292-158ebed8410c\") " pod="openstack/placement-db-sync-dvmqw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.686826 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1577e002-5267-48f4-b292-158ebed8410c-scripts\") pod \"placement-db-sync-dvmqw\" (UID: \"1577e002-5267-48f4-b292-158ebed8410c\") " pod="openstack/placement-db-sync-dvmqw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.686879 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1577e002-5267-48f4-b292-158ebed8410c-logs\") pod \"placement-db-sync-dvmqw\" (UID: \"1577e002-5267-48f4-b292-158ebed8410c\") " pod="openstack/placement-db-sync-dvmqw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.686912 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-config\") pod \"dnsmasq-dns-76fcf4b695-sw4mc\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.696013 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ab794c0-2264-4041-b697-ef7829a5129a-combined-ca-bundle\") pod \"barbican-db-sync-x49zw\" (UID: \"4ab794c0-2264-4041-b697-ef7829a5129a\") " pod="openstack/barbican-db-sync-x49zw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.705262 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-wgc6v" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.706088 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4ab794c0-2264-4041-b697-ef7829a5129a-db-sync-config-data\") pod \"barbican-db-sync-x49zw\" (UID: \"4ab794c0-2264-4041-b697-ef7829a5129a\") " pod="openstack/barbican-db-sync-x49zw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.716032 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhlfq\" (UniqueName: \"kubernetes.io/projected/4ab794c0-2264-4041-b697-ef7829a5129a-kube-api-access-dhlfq\") pod \"barbican-db-sync-x49zw\" (UID: \"4ab794c0-2264-4041-b697-ef7829a5129a\") " pod="openstack/barbican-db-sync-x49zw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.729539 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-47dzd" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.789350 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vc7l\" (UniqueName: \"kubernetes.io/projected/158b7bf7-1207-4509-bb3f-d666847eb59d-kube-api-access-8vc7l\") pod \"ceilometer-0\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.789451 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wwgg\" (UniqueName: \"kubernetes.io/projected/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-kube-api-access-2wwgg\") pod \"dnsmasq-dns-76fcf4b695-sw4mc\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.789488 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-dns-svc\") pod \"dnsmasq-dns-76fcf4b695-sw4mc\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.789543 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-dns-swift-storage-0\") pod \"dnsmasq-dns-76fcf4b695-sw4mc\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.789576 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-config-data\") pod \"ceilometer-0\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.789612 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-ovsdbserver-sb\") pod \"dnsmasq-dns-76fcf4b695-sw4mc\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.789644 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-scripts\") pod \"ceilometer-0\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.789681 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1577e002-5267-48f4-b292-158ebed8410c-combined-ca-bundle\") pod \"placement-db-sync-dvmqw\" (UID: \"1577e002-5267-48f4-b292-158ebed8410c\") " pod="openstack/placement-db-sync-dvmqw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.789733 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1577e002-5267-48f4-b292-158ebed8410c-scripts\") pod \"placement-db-sync-dvmqw\" (UID: \"1577e002-5267-48f4-b292-158ebed8410c\") " pod="openstack/placement-db-sync-dvmqw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.789760 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.789811 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/158b7bf7-1207-4509-bb3f-d666847eb59d-run-httpd\") pod \"ceilometer-0\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.789837 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1577e002-5267-48f4-b292-158ebed8410c-logs\") pod \"placement-db-sync-dvmqw\" (UID: \"1577e002-5267-48f4-b292-158ebed8410c\") " pod="openstack/placement-db-sync-dvmqw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.789865 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.789886 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-config\") pod \"dnsmasq-dns-76fcf4b695-sw4mc\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.789922 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/158b7bf7-1207-4509-bb3f-d666847eb59d-log-httpd\") pod \"ceilometer-0\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.789949 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1577e002-5267-48f4-b292-158ebed8410c-config-data\") pod \"placement-db-sync-dvmqw\" (UID: \"1577e002-5267-48f4-b292-158ebed8410c\") " pod="openstack/placement-db-sync-dvmqw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.789997 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxhvh\" (UniqueName: \"kubernetes.io/projected/1577e002-5267-48f4-b292-158ebed8410c-kube-api-access-kxhvh\") pod \"placement-db-sync-dvmqw\" (UID: \"1577e002-5267-48f4-b292-158ebed8410c\") " pod="openstack/placement-db-sync-dvmqw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.790050 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-ovsdbserver-nb\") pod \"dnsmasq-dns-76fcf4b695-sw4mc\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.791220 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-ovsdbserver-nb\") pod \"dnsmasq-dns-76fcf4b695-sw4mc\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.792310 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-dns-svc\") pod \"dnsmasq-dns-76fcf4b695-sw4mc\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.793002 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-dns-swift-storage-0\") pod \"dnsmasq-dns-76fcf4b695-sw4mc\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.794255 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1577e002-5267-48f4-b292-158ebed8410c-logs\") pod \"placement-db-sync-dvmqw\" (UID: \"1577e002-5267-48f4-b292-158ebed8410c\") " pod="openstack/placement-db-sync-dvmqw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.794655 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-config\") pod \"dnsmasq-dns-76fcf4b695-sw4mc\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.798470 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1577e002-5267-48f4-b292-158ebed8410c-combined-ca-bundle\") pod \"placement-db-sync-dvmqw\" (UID: \"1577e002-5267-48f4-b292-158ebed8410c\") " pod="openstack/placement-db-sync-dvmqw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.799673 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1577e002-5267-48f4-b292-158ebed8410c-config-data\") pod \"placement-db-sync-dvmqw\" (UID: \"1577e002-5267-48f4-b292-158ebed8410c\") " pod="openstack/placement-db-sync-dvmqw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.800396 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-ovsdbserver-sb\") pod \"dnsmasq-dns-76fcf4b695-sw4mc\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.809900 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1577e002-5267-48f4-b292-158ebed8410c-scripts\") pod \"placement-db-sync-dvmqw\" (UID: \"1577e002-5267-48f4-b292-158ebed8410c\") " pod="openstack/placement-db-sync-dvmqw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.818045 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxhvh\" (UniqueName: \"kubernetes.io/projected/1577e002-5267-48f4-b292-158ebed8410c-kube-api-access-kxhvh\") pod \"placement-db-sync-dvmqw\" (UID: \"1577e002-5267-48f4-b292-158ebed8410c\") " pod="openstack/placement-db-sync-dvmqw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.820397 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wwgg\" (UniqueName: \"kubernetes.io/projected/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-kube-api-access-2wwgg\") pod \"dnsmasq-dns-76fcf4b695-sw4mc\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.891723 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/158b7bf7-1207-4509-bb3f-d666847eb59d-run-httpd\") pod \"ceilometer-0\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.891785 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.891856 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/158b7bf7-1207-4509-bb3f-d666847eb59d-log-httpd\") pod \"ceilometer-0\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.892777 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/158b7bf7-1207-4509-bb3f-d666847eb59d-log-httpd\") pod \"ceilometer-0\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.892824 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vc7l\" (UniqueName: \"kubernetes.io/projected/158b7bf7-1207-4509-bb3f-d666847eb59d-kube-api-access-8vc7l\") pod \"ceilometer-0\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.893065 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-config-data\") pod \"ceilometer-0\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.893171 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-scripts\") pod \"ceilometer-0\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.893317 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.893683 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/158b7bf7-1207-4509-bb3f-d666847eb59d-run-httpd\") pod \"ceilometer-0\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.898575 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-scripts\") pod \"ceilometer-0\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.899027 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.900112 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.900652 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-config-data\") pod \"ceilometer-0\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.908976 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-x49zw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.926895 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vc7l\" (UniqueName: \"kubernetes.io/projected/158b7bf7-1207-4509-bb3f-d666847eb59d-kube-api-access-8vc7l\") pod \"ceilometer-0\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " pod="openstack/ceilometer-0" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.937135 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-dvmqw" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.952840 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:43 crc kubenswrapper[5039]: I1124 13:39:43.993709 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:39:46 crc kubenswrapper[5039]: I1124 13:39:46.174702 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:39:46 crc kubenswrapper[5039]: I1124 13:39:46.622562 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:50 crc kubenswrapper[5039]: I1124 13:39:50.101597 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:39:50 crc kubenswrapper[5039]: I1124 13:39:50.102348 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:39:50 crc kubenswrapper[5039]: I1124 13:39:50.102405 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:39:50 crc kubenswrapper[5039]: I1124 13:39:50.103161 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f993c951919012dcf982065d331337a1627947abef22ad885fe48114cf5620d5"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 13:39:50 crc kubenswrapper[5039]: I1124 13:39:50.103218 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://f993c951919012dcf982065d331337a1627947abef22ad885fe48114cf5620d5" gracePeriod=600 Nov 24 13:39:50 crc kubenswrapper[5039]: I1124 13:39:50.833325 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="f993c951919012dcf982065d331337a1627947abef22ad885fe48114cf5620d5" exitCode=0 Nov 24 13:39:50 crc kubenswrapper[5039]: I1124 13:39:50.833373 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"f993c951919012dcf982065d331337a1627947abef22ad885fe48114cf5620d5"} Nov 24 13:39:50 crc kubenswrapper[5039]: I1124 13:39:50.833412 5039 scope.go:117] "RemoveContainer" containerID="16f5b0fb44ff36ed732d98fa0d4391bb1a697e230891b1a79ab6e7366f72ba49" Nov 24 13:39:51 crc kubenswrapper[5039]: E1124 13:39:51.107622 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Nov 24 13:39:51 crc kubenswrapper[5039]: E1124 13:39:51.108014 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f27nh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-vvjsm_openstack(2c22dae7-e545-4eb0-9552-f3c691f397df): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:39:51 crc kubenswrapper[5039]: E1124 13:39:51.109676 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-vvjsm" podUID="2c22dae7-e545-4eb0-9552-f3c691f397df" Nov 24 13:39:51 crc kubenswrapper[5039]: I1124 13:39:51.613556 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-qvrwf"] Nov 24 13:39:51 crc kubenswrapper[5039]: E1124 13:39:51.862606 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-vvjsm" podUID="2c22dae7-e545-4eb0-9552-f3c691f397df" Nov 24 13:39:51 crc kubenswrapper[5039]: I1124 13:39:51.873643 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-n7h6g"] Nov 24 13:39:51 crc kubenswrapper[5039]: I1124 13:39:51.951849 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-wgc6v"] Nov 24 13:39:51 crc kubenswrapper[5039]: I1124 13:39:51.958159 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-dvmqw"] Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.106950 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-kw8fc"] Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.122433 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-47dzd"] Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.129908 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-x49zw"] Nov 24 13:39:52 crc kubenswrapper[5039]: W1124 13:39:52.173539 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod24c51914_29d4_40bb_b6a5_e14aac592ce3.slice/crio-e4594a97e38908aaad1ad6c1e61e391ef55571c4d8ee13e294d4458bd5617f4c WatchSource:0}: Error finding container e4594a97e38908aaad1ad6c1e61e391ef55571c4d8ee13e294d4458bd5617f4c: Status 404 returned error can't find the container with id e4594a97e38908aaad1ad6c1e61e391ef55571c4d8ee13e294d4458bd5617f4c Nov 24 13:39:52 crc kubenswrapper[5039]: W1124 13:39:52.186954 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ab794c0_2264_4041_b697_ef7829a5129a.slice/crio-45bd170283b7e9cd3031e9311c85268801a1f71ad9040cb0f989e7731a96a449 WatchSource:0}: Error finding container 45bd170283b7e9cd3031e9311c85268801a1f71ad9040cb0f989e7731a96a449: Status 404 returned error can't find the container with id 45bd170283b7e9cd3031e9311c85268801a1f71ad9040cb0f989e7731a96a449 Nov 24 13:39:52 crc kubenswrapper[5039]: W1124 13:39:52.192116 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda3938495_f119_4641_b76b_0333c1391b24.slice/crio-416a9903ba00df7ea466b990e781cc84025b0645a48fbe7e8ec2fc807cd3a8e4 WatchSource:0}: Error finding container 416a9903ba00df7ea466b990e781cc84025b0645a48fbe7e8ec2fc807cd3a8e4: Status 404 returned error can't find the container with id 416a9903ba00df7ea466b990e781cc84025b0645a48fbe7e8ec2fc807cd3a8e4 Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.352467 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.363445 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-sw4mc"] Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.863391 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-qvrwf" event={"ID":"7c0313ce-4944-4fad-bce0-47d60b273f69","Type":"ContainerStarted","Data":"bd9ae7ac87801b66dd098f612d39ad48db979ad18bdcc8c9292952cb351eaccb"} Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.865637 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-47dzd" event={"ID":"cb2d453a-99e6-4593-ad2d-a57c7a2c2519","Type":"ContainerStarted","Data":"76be77551badf9f5cad8f5dac08e7c460a90f17aed782b7a85250574c2317d04"} Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.868926 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-x49zw" event={"ID":"4ab794c0-2264-4041-b697-ef7829a5129a","Type":"ContainerStarted","Data":"45bd170283b7e9cd3031e9311c85268801a1f71ad9040cb0f989e7731a96a449"} Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.874537 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"158b7bf7-1207-4509-bb3f-d666847eb59d","Type":"ContainerStarted","Data":"92ad996d731c70cc5d910d40f7117e4e8ffc1551bf2171368d38eba44f984c16"} Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.876925 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-wgc6v" event={"ID":"a3938495-f119-4641-b76b-0333c1391b24","Type":"ContainerStarted","Data":"36249bd63021c1e97ceee74e50e1de631a98586dfedf719d9a4b4afae3b296a9"} Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.876949 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-wgc6v" event={"ID":"a3938495-f119-4641-b76b-0333c1391b24","Type":"ContainerStarted","Data":"416a9903ba00df7ea466b990e781cc84025b0645a48fbe7e8ec2fc807cd3a8e4"} Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.879920 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133"} Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.887124 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-dvmqw" event={"ID":"1577e002-5267-48f4-b292-158ebed8410c","Type":"ContainerStarted","Data":"0d13c32a93f999586434018e372d3fcd393db197362227457049e302ab95b74a"} Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.888938 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-n7h6g" event={"ID":"92c678de-53b6-450f-a106-3ec37705ea3b","Type":"ContainerStarted","Data":"b9b4a336c29ad683550561fdb764ff313aac831a347ffd5f562bebbebe1c2365"} Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.888960 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-n7h6g" event={"ID":"92c678de-53b6-450f-a106-3ec37705ea3b","Type":"ContainerStarted","Data":"14d94ef9b2f09d3bd480f5b846ee6fd48a7dd6c8d1e0fc5aac2452b651eb3796"} Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.902685 5039 generic.go:334] "Generic (PLEG): container finished" podID="9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21" containerID="da9544984bc7bed87b2addb4937d3b07e5a7e66bad28910ae0e06cc210d24320" exitCode=0 Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.902796 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" event={"ID":"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21","Type":"ContainerDied","Data":"da9544984bc7bed87b2addb4937d3b07e5a7e66bad28910ae0e06cc210d24320"} Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.902824 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" event={"ID":"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21","Type":"ContainerStarted","Data":"65570993434dab4b93a2c64ec323510d083e3dd499c03eb26db56180755c7c64"} Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.910634 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"699baf57-b50c-43fd-adc9-7ff6333294df","Type":"ContainerStarted","Data":"22711374ef6575b81dd01aa125a85a2b84b89d45f0761b9a5b7ebedcfbac5fd3"} Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.914335 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-wgc6v" podStartSLOduration=9.914283225 podStartE2EDuration="9.914283225s" podCreationTimestamp="2025-11-24 13:39:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:39:52.896478441 +0000 UTC m=+1305.335602951" watchObservedRunningTime="2025-11-24 13:39:52.914283225 +0000 UTC m=+1305.353407725" Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.925738 5039 generic.go:334] "Generic (PLEG): container finished" podID="24c51914-29d4-40bb-b6a5-e14aac592ce3" containerID="d992bbc96521a9e5c9b7b7f7f23a8487a5556929fe20803c7add0c2337c11ee1" exitCode=0 Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.925804 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" event={"ID":"24c51914-29d4-40bb-b6a5-e14aac592ce3","Type":"ContainerDied","Data":"d992bbc96521a9e5c9b7b7f7f23a8487a5556929fe20803c7add0c2337c11ee1"} Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.925835 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" event={"ID":"24c51914-29d4-40bb-b6a5-e14aac592ce3","Type":"ContainerStarted","Data":"e4594a97e38908aaad1ad6c1e61e391ef55571c4d8ee13e294d4458bd5617f4c"} Nov 24 13:39:52 crc kubenswrapper[5039]: I1124 13:39:52.938161 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-n7h6g" podStartSLOduration=10.938135069 podStartE2EDuration="10.938135069s" podCreationTimestamp="2025-11-24 13:39:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:39:52.911941879 +0000 UTC m=+1305.351066369" watchObservedRunningTime="2025-11-24 13:39:52.938135069 +0000 UTC m=+1305.377259569" Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.011872 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=2.174725881 podStartE2EDuration="14.011853162s" podCreationTimestamp="2025-11-24 13:39:39 +0000 UTC" firstStartedPulling="2025-11-24 13:39:40.418325389 +0000 UTC m=+1292.857449889" lastFinishedPulling="2025-11-24 13:39:52.25545267 +0000 UTC m=+1304.694577170" observedRunningTime="2025-11-24 13:39:52.999016529 +0000 UTC m=+1305.438141029" watchObservedRunningTime="2025-11-24 13:39:53.011853162 +0000 UTC m=+1305.450977662" Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.501959 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.648860 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-dns-swift-storage-0\") pod \"24c51914-29d4-40bb-b6a5-e14aac592ce3\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.648928 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-config\") pod \"24c51914-29d4-40bb-b6a5-e14aac592ce3\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.648963 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-ovsdbserver-nb\") pod \"24c51914-29d4-40bb-b6a5-e14aac592ce3\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.649104 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-ovsdbserver-sb\") pod \"24c51914-29d4-40bb-b6a5-e14aac592ce3\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.649233 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-dns-svc\") pod \"24c51914-29d4-40bb-b6a5-e14aac592ce3\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.649314 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4k8f8\" (UniqueName: \"kubernetes.io/projected/24c51914-29d4-40bb-b6a5-e14aac592ce3-kube-api-access-4k8f8\") pod \"24c51914-29d4-40bb-b6a5-e14aac592ce3\" (UID: \"24c51914-29d4-40bb-b6a5-e14aac592ce3\") " Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.665703 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24c51914-29d4-40bb-b6a5-e14aac592ce3-kube-api-access-4k8f8" (OuterVolumeSpecName: "kube-api-access-4k8f8") pod "24c51914-29d4-40bb-b6a5-e14aac592ce3" (UID: "24c51914-29d4-40bb-b6a5-e14aac592ce3"). InnerVolumeSpecName "kube-api-access-4k8f8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.702262 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "24c51914-29d4-40bb-b6a5-e14aac592ce3" (UID: "24c51914-29d4-40bb-b6a5-e14aac592ce3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.717610 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "24c51914-29d4-40bb-b6a5-e14aac592ce3" (UID: "24c51914-29d4-40bb-b6a5-e14aac592ce3"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.720887 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "24c51914-29d4-40bb-b6a5-e14aac592ce3" (UID: "24c51914-29d4-40bb-b6a5-e14aac592ce3"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.739489 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-config" (OuterVolumeSpecName: "config") pod "24c51914-29d4-40bb-b6a5-e14aac592ce3" (UID: "24c51914-29d4-40bb-b6a5-e14aac592ce3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.747590 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "24c51914-29d4-40bb-b6a5-e14aac592ce3" (UID: "24c51914-29d4-40bb-b6a5-e14aac592ce3"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.751780 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4k8f8\" (UniqueName: \"kubernetes.io/projected/24c51914-29d4-40bb-b6a5-e14aac592ce3-kube-api-access-4k8f8\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.751815 5039 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.751826 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.751836 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.751849 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.751861 5039 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/24c51914-29d4-40bb-b6a5-e14aac592ce3-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.967581 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" event={"ID":"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21","Type":"ContainerStarted","Data":"e3785e5f236852810dbb5f0ccced6212d751b5f6809673aa541679fde30ab67c"} Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.967698 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.990776 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" event={"ID":"24c51914-29d4-40bb-b6a5-e14aac592ce3","Type":"ContainerDied","Data":"e4594a97e38908aaad1ad6c1e61e391ef55571c4d8ee13e294d4458bd5617f4c"} Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.990838 5039 scope.go:117] "RemoveContainer" containerID="d992bbc96521a9e5c9b7b7f7f23a8487a5556929fe20803c7add0c2337c11ee1" Nov 24 13:39:53 crc kubenswrapper[5039]: I1124 13:39:53.991006 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55fff446b9-kw8fc" Nov 24 13:39:54 crc kubenswrapper[5039]: I1124 13:39:54.012469 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" podStartSLOduration=11.012450208 podStartE2EDuration="11.012450208s" podCreationTimestamp="2025-11-24 13:39:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:39:53.991625448 +0000 UTC m=+1306.430749958" watchObservedRunningTime="2025-11-24 13:39:54.012450208 +0000 UTC m=+1306.451574708" Nov 24 13:39:54 crc kubenswrapper[5039]: I1124 13:39:54.091810 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-kw8fc"] Nov 24 13:39:54 crc kubenswrapper[5039]: I1124 13:39:54.105206 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-kw8fc"] Nov 24 13:39:54 crc kubenswrapper[5039]: I1124 13:39:54.323893 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24c51914-29d4-40bb-b6a5-e14aac592ce3" path="/var/lib/kubelet/pods/24c51914-29d4-40bb-b6a5-e14aac592ce3/volumes" Nov 24 13:39:56 crc kubenswrapper[5039]: I1124 13:39:56.623215 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:56 crc kubenswrapper[5039]: I1124 13:39:56.631626 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:57 crc kubenswrapper[5039]: I1124 13:39:57.022621 5039 generic.go:334] "Generic (PLEG): container finished" podID="92c678de-53b6-450f-a106-3ec37705ea3b" containerID="b9b4a336c29ad683550561fdb764ff313aac831a347ffd5f562bebbebe1c2365" exitCode=0 Nov 24 13:39:57 crc kubenswrapper[5039]: I1124 13:39:57.022710 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-n7h6g" event={"ID":"92c678de-53b6-450f-a106-3ec37705ea3b","Type":"ContainerDied","Data":"b9b4a336c29ad683550561fdb764ff313aac831a347ffd5f562bebbebe1c2365"} Nov 24 13:39:57 crc kubenswrapper[5039]: I1124 13:39:57.038681 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 24 13:39:58 crc kubenswrapper[5039]: I1124 13:39:58.957848 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:39:59 crc kubenswrapper[5039]: I1124 13:39:59.015518 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-5t2p6"] Nov 24 13:39:59 crc kubenswrapper[5039]: I1124 13:39:59.015813 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" podUID="938759c5-f8df-4087-a815-e6346ce7de38" containerName="dnsmasq-dns" containerID="cri-o://08a5e493313c144245266dfd6658b8d5cd3a02f6f76b837738c7a8ea96006770" gracePeriod=10 Nov 24 13:39:59 crc kubenswrapper[5039]: I1124 13:39:59.031826 5039 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod236b7551-4b7b-4643-afe6-0bb78c880b3b"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod236b7551-4b7b-4643-afe6-0bb78c880b3b] : Timed out while waiting for systemd to remove kubepods-besteffort-pod236b7551_4b7b_4643_afe6_0bb78c880b3b.slice" Nov 24 13:39:59 crc kubenswrapper[5039]: E1124 13:39:59.031873 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod236b7551-4b7b-4643-afe6-0bb78c880b3b] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod236b7551-4b7b-4643-afe6-0bb78c880b3b] : Timed out while waiting for systemd to remove kubepods-besteffort-pod236b7551_4b7b_4643_afe6_0bb78c880b3b.slice" pod="openstack/mysqld-exporter-openstack-db-create-d56m7" podUID="236b7551-4b7b-4643-afe6-0bb78c880b3b" Nov 24 13:39:59 crc kubenswrapper[5039]: I1124 13:39:59.470785 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" podUID="938759c5-f8df-4087-a815-e6346ce7de38" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.163:5353: connect: connection refused" Nov 24 13:40:00 crc kubenswrapper[5039]: I1124 13:40:00.065800 5039 generic.go:334] "Generic (PLEG): container finished" podID="938759c5-f8df-4087-a815-e6346ce7de38" containerID="08a5e493313c144245266dfd6658b8d5cd3a02f6f76b837738c7a8ea96006770" exitCode=0 Nov 24 13:40:00 crc kubenswrapper[5039]: I1124 13:40:00.065875 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" event={"ID":"938759c5-f8df-4087-a815-e6346ce7de38","Type":"ContainerDied","Data":"08a5e493313c144245266dfd6658b8d5cd3a02f6f76b837738c7a8ea96006770"} Nov 24 13:40:00 crc kubenswrapper[5039]: I1124 13:40:00.066093 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-d56m7" Nov 24 13:40:04 crc kubenswrapper[5039]: I1124 13:40:04.470838 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" podUID="938759c5-f8df-4087-a815-e6346ce7de38" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.163:5353: connect: connection refused" Nov 24 13:40:07 crc kubenswrapper[5039]: E1124 13:40:07.147646 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Nov 24 13:40:07 crc kubenswrapper[5039]: E1124 13:40:07.148104 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kxhvh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-dvmqw_openstack(1577e002-5267-48f4-b292-158ebed8410c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:40:07 crc kubenswrapper[5039]: E1124 13:40:07.149423 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-dvmqw" podUID="1577e002-5267-48f4-b292-158ebed8410c" Nov 24 13:40:07 crc kubenswrapper[5039]: I1124 13:40:07.303350 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:40:07 crc kubenswrapper[5039]: I1124 13:40:07.343621 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-combined-ca-bundle\") pod \"92c678de-53b6-450f-a106-3ec37705ea3b\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " Nov 24 13:40:07 crc kubenswrapper[5039]: I1124 13:40:07.343750 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-fernet-keys\") pod \"92c678de-53b6-450f-a106-3ec37705ea3b\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " Nov 24 13:40:07 crc kubenswrapper[5039]: I1124 13:40:07.343808 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-credential-keys\") pod \"92c678de-53b6-450f-a106-3ec37705ea3b\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " Nov 24 13:40:07 crc kubenswrapper[5039]: I1124 13:40:07.344686 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rknfk\" (UniqueName: \"kubernetes.io/projected/92c678de-53b6-450f-a106-3ec37705ea3b-kube-api-access-rknfk\") pod \"92c678de-53b6-450f-a106-3ec37705ea3b\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " Nov 24 13:40:07 crc kubenswrapper[5039]: I1124 13:40:07.344732 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-scripts\") pod \"92c678de-53b6-450f-a106-3ec37705ea3b\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " Nov 24 13:40:07 crc kubenswrapper[5039]: I1124 13:40:07.344751 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-config-data\") pod \"92c678de-53b6-450f-a106-3ec37705ea3b\" (UID: \"92c678de-53b6-450f-a106-3ec37705ea3b\") " Nov 24 13:40:07 crc kubenswrapper[5039]: I1124 13:40:07.350230 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "92c678de-53b6-450f-a106-3ec37705ea3b" (UID: "92c678de-53b6-450f-a106-3ec37705ea3b"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:07 crc kubenswrapper[5039]: I1124 13:40:07.351350 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "92c678de-53b6-450f-a106-3ec37705ea3b" (UID: "92c678de-53b6-450f-a106-3ec37705ea3b"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:07 crc kubenswrapper[5039]: I1124 13:40:07.353722 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92c678de-53b6-450f-a106-3ec37705ea3b-kube-api-access-rknfk" (OuterVolumeSpecName: "kube-api-access-rknfk") pod "92c678de-53b6-450f-a106-3ec37705ea3b" (UID: "92c678de-53b6-450f-a106-3ec37705ea3b"). InnerVolumeSpecName "kube-api-access-rknfk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:40:07 crc kubenswrapper[5039]: I1124 13:40:07.360405 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-scripts" (OuterVolumeSpecName: "scripts") pod "92c678de-53b6-450f-a106-3ec37705ea3b" (UID: "92c678de-53b6-450f-a106-3ec37705ea3b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:07 crc kubenswrapper[5039]: I1124 13:40:07.382305 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "92c678de-53b6-450f-a106-3ec37705ea3b" (UID: "92c678de-53b6-450f-a106-3ec37705ea3b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:07 crc kubenswrapper[5039]: I1124 13:40:07.382442 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-config-data" (OuterVolumeSpecName: "config-data") pod "92c678de-53b6-450f-a106-3ec37705ea3b" (UID: "92c678de-53b6-450f-a106-3ec37705ea3b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:07 crc kubenswrapper[5039]: I1124 13:40:07.447026 5039 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:07 crc kubenswrapper[5039]: I1124 13:40:07.447059 5039 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:07 crc kubenswrapper[5039]: I1124 13:40:07.447071 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rknfk\" (UniqueName: \"kubernetes.io/projected/92c678de-53b6-450f-a106-3ec37705ea3b-kube-api-access-rknfk\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:07 crc kubenswrapper[5039]: I1124 13:40:07.447079 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:07 crc kubenswrapper[5039]: I1124 13:40:07.447089 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:07 crc kubenswrapper[5039]: I1124 13:40:07.447097 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92c678de-53b6-450f-a106-3ec37705ea3b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.163874 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-n7h6g" event={"ID":"92c678de-53b6-450f-a106-3ec37705ea3b","Type":"ContainerDied","Data":"14d94ef9b2f09d3bd480f5b846ee6fd48a7dd6c8d1e0fc5aac2452b651eb3796"} Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.163959 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="14d94ef9b2f09d3bd480f5b846ee6fd48a7dd6c8d1e0fc5aac2452b651eb3796" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.163914 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-n7h6g" Nov 24 13:40:08 crc kubenswrapper[5039]: E1124 13:40:08.171907 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-dvmqw" podUID="1577e002-5267-48f4-b292-158ebed8410c" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.387852 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-n7h6g"] Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.396094 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-n7h6g"] Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.498251 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-hmshf"] Nov 24 13:40:08 crc kubenswrapper[5039]: E1124 13:40:08.498697 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92c678de-53b6-450f-a106-3ec37705ea3b" containerName="keystone-bootstrap" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.498715 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="92c678de-53b6-450f-a106-3ec37705ea3b" containerName="keystone-bootstrap" Nov 24 13:40:08 crc kubenswrapper[5039]: E1124 13:40:08.498727 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24c51914-29d4-40bb-b6a5-e14aac592ce3" containerName="init" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.498734 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="24c51914-29d4-40bb-b6a5-e14aac592ce3" containerName="init" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.498933 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="92c678de-53b6-450f-a106-3ec37705ea3b" containerName="keystone-bootstrap" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.498946 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="24c51914-29d4-40bb-b6a5-e14aac592ce3" containerName="init" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.499593 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.501806 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.502128 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.502336 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.502450 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-jsx5z" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.504386 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.510266 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-hmshf"] Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.589131 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flxlr\" (UniqueName: \"kubernetes.io/projected/6a1e2c3e-f294-40f6-9b98-5fd16034f145-kube-api-access-flxlr\") pod \"keystone-bootstrap-hmshf\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.589175 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-fernet-keys\") pod \"keystone-bootstrap-hmshf\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.589282 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-credential-keys\") pod \"keystone-bootstrap-hmshf\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.589315 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-combined-ca-bundle\") pod \"keystone-bootstrap-hmshf\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.589337 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-config-data\") pod \"keystone-bootstrap-hmshf\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.589448 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-scripts\") pod \"keystone-bootstrap-hmshf\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.691348 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-credential-keys\") pod \"keystone-bootstrap-hmshf\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.691397 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-combined-ca-bundle\") pod \"keystone-bootstrap-hmshf\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.691417 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-config-data\") pod \"keystone-bootstrap-hmshf\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.691459 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-scripts\") pod \"keystone-bootstrap-hmshf\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.691549 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flxlr\" (UniqueName: \"kubernetes.io/projected/6a1e2c3e-f294-40f6-9b98-5fd16034f145-kube-api-access-flxlr\") pod \"keystone-bootstrap-hmshf\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.691571 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-fernet-keys\") pod \"keystone-bootstrap-hmshf\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.695814 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-combined-ca-bundle\") pod \"keystone-bootstrap-hmshf\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.698071 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-scripts\") pod \"keystone-bootstrap-hmshf\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.698687 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-config-data\") pod \"keystone-bootstrap-hmshf\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.699057 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-fernet-keys\") pod \"keystone-bootstrap-hmshf\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.703053 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-credential-keys\") pod \"keystone-bootstrap-hmshf\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.709852 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flxlr\" (UniqueName: \"kubernetes.io/projected/6a1e2c3e-f294-40f6-9b98-5fd16034f145-kube-api-access-flxlr\") pod \"keystone-bootstrap-hmshf\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:08 crc kubenswrapper[5039]: I1124 13:40:08.818215 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:09 crc kubenswrapper[5039]: I1124 13:40:09.470815 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" podUID="938759c5-f8df-4087-a815-e6346ce7de38" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.163:5353: connect: connection refused" Nov 24 13:40:09 crc kubenswrapper[5039]: I1124 13:40:09.471134 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:40:10 crc kubenswrapper[5039]: I1124 13:40:10.323189 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92c678de-53b6-450f-a106-3ec37705ea3b" path="/var/lib/kubelet/pods/92c678de-53b6-450f-a106-3ec37705ea3b/volumes" Nov 24 13:40:14 crc kubenswrapper[5039]: I1124 13:40:14.471190 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" podUID="938759c5-f8df-4087-a815-e6346ce7de38" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.163:5353: connect: connection refused" Nov 24 13:40:19 crc kubenswrapper[5039]: I1124 13:40:19.471431 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" podUID="938759c5-f8df-4087-a815-e6346ce7de38" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.163:5353: connect: connection refused" Nov 24 13:40:24 crc kubenswrapper[5039]: I1124 13:40:24.471208 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" podUID="938759c5-f8df-4087-a815-e6346ce7de38" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.163:5353: connect: connection refused" Nov 24 13:40:29 crc kubenswrapper[5039]: E1124 13:40:29.113863 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 24 13:40:29 crc kubenswrapper[5039]: E1124 13:40:29.115110 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dhlfq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-x49zw_openstack(4ab794c0-2264-4041-b697-ef7829a5129a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:40:29 crc kubenswrapper[5039]: E1124 13:40:29.116342 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-x49zw" podUID="4ab794c0-2264-4041-b697-ef7829a5129a" Nov 24 13:40:29 crc kubenswrapper[5039]: E1124 13:40:29.396003 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-x49zw" podUID="4ab794c0-2264-4041-b697-ef7829a5129a" Nov 24 13:40:29 crc kubenswrapper[5039]: E1124 13:40:29.537558 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified" Nov 24 13:40:29 crc kubenswrapper[5039]: E1124 13:40:29.538002 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-w5smd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-qvrwf_openstack(7c0313ce-4944-4fad-bce0-47d60b273f69): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:40:29 crc kubenswrapper[5039]: E1124 13:40:29.539253 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-qvrwf" podUID="7c0313ce-4944-4fad-bce0-47d60b273f69" Nov 24 13:40:30 crc kubenswrapper[5039]: E1124 13:40:30.406986 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified\\\"\"" pod="openstack/heat-db-sync-qvrwf" podUID="7c0313ce-4944-4fad-bce0-47d60b273f69" Nov 24 13:40:30 crc kubenswrapper[5039]: E1124 13:40:30.660429 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 24 13:40:30 crc kubenswrapper[5039]: E1124 13:40:30.660957 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gxkfs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-47dzd_openstack(cb2d453a-99e6-4593-ad2d-a57c7a2c2519): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:40:30 crc kubenswrapper[5039]: E1124 13:40:30.662154 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-47dzd" podUID="cb2d453a-99e6-4593-ad2d-a57c7a2c2519" Nov 24 13:40:30 crc kubenswrapper[5039]: I1124 13:40:30.794982 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:40:30 crc kubenswrapper[5039]: I1124 13:40:30.852800 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-dns-swift-storage-0\") pod \"938759c5-f8df-4087-a815-e6346ce7de38\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " Nov 24 13:40:30 crc kubenswrapper[5039]: I1124 13:40:30.852852 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-config\") pod \"938759c5-f8df-4087-a815-e6346ce7de38\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " Nov 24 13:40:30 crc kubenswrapper[5039]: I1124 13:40:30.852955 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vwg59\" (UniqueName: \"kubernetes.io/projected/938759c5-f8df-4087-a815-e6346ce7de38-kube-api-access-vwg59\") pod \"938759c5-f8df-4087-a815-e6346ce7de38\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " Nov 24 13:40:30 crc kubenswrapper[5039]: I1124 13:40:30.852980 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-ovsdbserver-sb\") pod \"938759c5-f8df-4087-a815-e6346ce7de38\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " Nov 24 13:40:30 crc kubenswrapper[5039]: I1124 13:40:30.853089 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-ovsdbserver-nb\") pod \"938759c5-f8df-4087-a815-e6346ce7de38\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " Nov 24 13:40:30 crc kubenswrapper[5039]: I1124 13:40:30.853132 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-dns-svc\") pod \"938759c5-f8df-4087-a815-e6346ce7de38\" (UID: \"938759c5-f8df-4087-a815-e6346ce7de38\") " Nov 24 13:40:30 crc kubenswrapper[5039]: I1124 13:40:30.902161 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/938759c5-f8df-4087-a815-e6346ce7de38-kube-api-access-vwg59" (OuterVolumeSpecName: "kube-api-access-vwg59") pod "938759c5-f8df-4087-a815-e6346ce7de38" (UID: "938759c5-f8df-4087-a815-e6346ce7de38"). InnerVolumeSpecName "kube-api-access-vwg59". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:40:30 crc kubenswrapper[5039]: I1124 13:40:30.953278 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "938759c5-f8df-4087-a815-e6346ce7de38" (UID: "938759c5-f8df-4087-a815-e6346ce7de38"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:40:30 crc kubenswrapper[5039]: I1124 13:40:30.953780 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "938759c5-f8df-4087-a815-e6346ce7de38" (UID: "938759c5-f8df-4087-a815-e6346ce7de38"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:40:30 crc kubenswrapper[5039]: I1124 13:40:30.959618 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:30 crc kubenswrapper[5039]: I1124 13:40:30.959687 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vwg59\" (UniqueName: \"kubernetes.io/projected/938759c5-f8df-4087-a815-e6346ce7de38-kube-api-access-vwg59\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:30 crc kubenswrapper[5039]: I1124 13:40:30.959698 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:30 crc kubenswrapper[5039]: I1124 13:40:30.983128 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "938759c5-f8df-4087-a815-e6346ce7de38" (UID: "938759c5-f8df-4087-a815-e6346ce7de38"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:40:30 crc kubenswrapper[5039]: I1124 13:40:30.989872 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-config" (OuterVolumeSpecName: "config") pod "938759c5-f8df-4087-a815-e6346ce7de38" (UID: "938759c5-f8df-4087-a815-e6346ce7de38"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:40:30 crc kubenswrapper[5039]: I1124 13:40:30.995090 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "938759c5-f8df-4087-a815-e6346ce7de38" (UID: "938759c5-f8df-4087-a815-e6346ce7de38"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:40:31 crc kubenswrapper[5039]: I1124 13:40:31.063169 5039 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:31 crc kubenswrapper[5039]: I1124 13:40:31.063201 5039 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:31 crc kubenswrapper[5039]: I1124 13:40:31.063217 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/938759c5-f8df-4087-a815-e6346ce7de38-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:31 crc kubenswrapper[5039]: I1124 13:40:31.236236 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-hmshf"] Nov 24 13:40:31 crc kubenswrapper[5039]: W1124 13:40:31.244813 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6a1e2c3e_f294_40f6_9b98_5fd16034f145.slice/crio-b3f2cbe1301c38c14e44c75b8d657698f220398a73876fe1a1b579a92626d093 WatchSource:0}: Error finding container b3f2cbe1301c38c14e44c75b8d657698f220398a73876fe1a1b579a92626d093: Status 404 returned error can't find the container with id b3f2cbe1301c38c14e44c75b8d657698f220398a73876fe1a1b579a92626d093 Nov 24 13:40:31 crc kubenswrapper[5039]: I1124 13:40:31.418182 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-dvmqw" event={"ID":"1577e002-5267-48f4-b292-158ebed8410c","Type":"ContainerStarted","Data":"baa7abd18ef86b6add604ce2f46066004e539d770256de57e129ebb1f3aa059a"} Nov 24 13:40:31 crc kubenswrapper[5039]: I1124 13:40:31.421682 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hmshf" event={"ID":"6a1e2c3e-f294-40f6-9b98-5fd16034f145","Type":"ContainerStarted","Data":"b3f2cbe1301c38c14e44c75b8d657698f220398a73876fe1a1b579a92626d093"} Nov 24 13:40:31 crc kubenswrapper[5039]: I1124 13:40:31.424354 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" event={"ID":"938759c5-f8df-4087-a815-e6346ce7de38","Type":"ContainerDied","Data":"9615e4f79753cd8ae1a15cd8e1b7ecff89ba9a4876fc351ba062c5fe74f297f6"} Nov 24 13:40:31 crc kubenswrapper[5039]: I1124 13:40:31.424418 5039 scope.go:117] "RemoveContainer" containerID="08a5e493313c144245266dfd6658b8d5cd3a02f6f76b837738c7a8ea96006770" Nov 24 13:40:31 crc kubenswrapper[5039]: I1124 13:40:31.424368 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" Nov 24 13:40:31 crc kubenswrapper[5039]: I1124 13:40:31.445152 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"158b7bf7-1207-4509-bb3f-d666847eb59d","Type":"ContainerStarted","Data":"50b846d0dd3fa1f7b3ebc5917ca54f611564534f5d4050a457263515aeadf570"} Nov 24 13:40:31 crc kubenswrapper[5039]: I1124 13:40:31.448241 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-dvmqw" podStartSLOduration=9.857697593 podStartE2EDuration="48.448220377s" podCreationTimestamp="2025-11-24 13:39:43 +0000 UTC" firstStartedPulling="2025-11-24 13:39:52.186540955 +0000 UTC m=+1304.625665455" lastFinishedPulling="2025-11-24 13:40:30.777063739 +0000 UTC m=+1343.216188239" observedRunningTime="2025-11-24 13:40:31.440141319 +0000 UTC m=+1343.879265829" watchObservedRunningTime="2025-11-24 13:40:31.448220377 +0000 UTC m=+1343.887344877" Nov 24 13:40:31 crc kubenswrapper[5039]: E1124 13:40:31.456108 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-47dzd" podUID="cb2d453a-99e6-4593-ad2d-a57c7a2c2519" Nov 24 13:40:31 crc kubenswrapper[5039]: I1124 13:40:31.456478 5039 scope.go:117] "RemoveContainer" containerID="d1633796936d0d4a5f1281754580cc7980ef3250d6fe9c3557d65c8db10051ce" Nov 24 13:40:31 crc kubenswrapper[5039]: I1124 13:40:31.472376 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-5t2p6"] Nov 24 13:40:31 crc kubenswrapper[5039]: I1124 13:40:31.492442 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-5t2p6"] Nov 24 13:40:32 crc kubenswrapper[5039]: I1124 13:40:32.320449 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="938759c5-f8df-4087-a815-e6346ce7de38" path="/var/lib/kubelet/pods/938759c5-f8df-4087-a815-e6346ce7de38/volumes" Nov 24 13:40:32 crc kubenswrapper[5039]: I1124 13:40:32.457247 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-vvjsm" event={"ID":"2c22dae7-e545-4eb0-9552-f3c691f397df","Type":"ContainerStarted","Data":"91f775a9e48154d99b12708d5794be3c4a30d39b15993c9d325891276817ad85"} Nov 24 13:40:32 crc kubenswrapper[5039]: I1124 13:40:32.458735 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hmshf" event={"ID":"6a1e2c3e-f294-40f6-9b98-5fd16034f145","Type":"ContainerStarted","Data":"19b11a9f9da4658334a3496647f8f469118a8cfc8fb444799ab27ff258767d7a"} Nov 24 13:40:32 crc kubenswrapper[5039]: I1124 13:40:32.476192 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-vvjsm" podStartSLOduration=3.647928866 podStartE2EDuration="1m1.47617462s" podCreationTimestamp="2025-11-24 13:39:31 +0000 UTC" firstStartedPulling="2025-11-24 13:39:32.945021383 +0000 UTC m=+1285.384145883" lastFinishedPulling="2025-11-24 13:40:30.773267137 +0000 UTC m=+1343.212391637" observedRunningTime="2025-11-24 13:40:32.471546427 +0000 UTC m=+1344.910670927" watchObservedRunningTime="2025-11-24 13:40:32.47617462 +0000 UTC m=+1344.915299120" Nov 24 13:40:32 crc kubenswrapper[5039]: I1124 13:40:32.496788 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-hmshf" podStartSLOduration=24.496767274 podStartE2EDuration="24.496767274s" podCreationTimestamp="2025-11-24 13:40:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:40:32.491020014 +0000 UTC m=+1344.930144514" watchObservedRunningTime="2025-11-24 13:40:32.496767274 +0000 UTC m=+1344.935891774" Nov 24 13:40:33 crc kubenswrapper[5039]: I1124 13:40:33.470999 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"158b7bf7-1207-4509-bb3f-d666847eb59d","Type":"ContainerStarted","Data":"8fc8f7a586d558f2f11d83f27837cff6495aeae1ae5fbea7c4dca0f303501b07"} Nov 24 13:40:34 crc kubenswrapper[5039]: I1124 13:40:34.471688 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-5t2p6" podUID="938759c5-f8df-4087-a815-e6346ce7de38" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.163:5353: i/o timeout" Nov 24 13:40:36 crc kubenswrapper[5039]: I1124 13:40:36.505109 5039 generic.go:334] "Generic (PLEG): container finished" podID="1577e002-5267-48f4-b292-158ebed8410c" containerID="baa7abd18ef86b6add604ce2f46066004e539d770256de57e129ebb1f3aa059a" exitCode=0 Nov 24 13:40:36 crc kubenswrapper[5039]: I1124 13:40:36.505681 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-dvmqw" event={"ID":"1577e002-5267-48f4-b292-158ebed8410c","Type":"ContainerDied","Data":"baa7abd18ef86b6add604ce2f46066004e539d770256de57e129ebb1f3aa059a"} Nov 24 13:40:36 crc kubenswrapper[5039]: I1124 13:40:36.508247 5039 generic.go:334] "Generic (PLEG): container finished" podID="6a1e2c3e-f294-40f6-9b98-5fd16034f145" containerID="19b11a9f9da4658334a3496647f8f469118a8cfc8fb444799ab27ff258767d7a" exitCode=0 Nov 24 13:40:36 crc kubenswrapper[5039]: I1124 13:40:36.508327 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hmshf" event={"ID":"6a1e2c3e-f294-40f6-9b98-5fd16034f145","Type":"ContainerDied","Data":"19b11a9f9da4658334a3496647f8f469118a8cfc8fb444799ab27ff258767d7a"} Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.749410 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-dvmqw" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.752186 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.872569 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kxhvh\" (UniqueName: \"kubernetes.io/projected/1577e002-5267-48f4-b292-158ebed8410c-kube-api-access-kxhvh\") pod \"1577e002-5267-48f4-b292-158ebed8410c\" (UID: \"1577e002-5267-48f4-b292-158ebed8410c\") " Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.872635 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1577e002-5267-48f4-b292-158ebed8410c-combined-ca-bundle\") pod \"1577e002-5267-48f4-b292-158ebed8410c\" (UID: \"1577e002-5267-48f4-b292-158ebed8410c\") " Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.872663 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1577e002-5267-48f4-b292-158ebed8410c-logs\") pod \"1577e002-5267-48f4-b292-158ebed8410c\" (UID: \"1577e002-5267-48f4-b292-158ebed8410c\") " Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.872720 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1577e002-5267-48f4-b292-158ebed8410c-scripts\") pod \"1577e002-5267-48f4-b292-158ebed8410c\" (UID: \"1577e002-5267-48f4-b292-158ebed8410c\") " Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.872746 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-config-data\") pod \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.872777 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-scripts\") pod \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.872901 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1577e002-5267-48f4-b292-158ebed8410c-config-data\") pod \"1577e002-5267-48f4-b292-158ebed8410c\" (UID: \"1577e002-5267-48f4-b292-158ebed8410c\") " Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.872930 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-flxlr\" (UniqueName: \"kubernetes.io/projected/6a1e2c3e-f294-40f6-9b98-5fd16034f145-kube-api-access-flxlr\") pod \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.872947 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-credential-keys\") pod \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.872983 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-fernet-keys\") pod \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.873023 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-combined-ca-bundle\") pod \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\" (UID: \"6a1e2c3e-f294-40f6-9b98-5fd16034f145\") " Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.873592 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1577e002-5267-48f4-b292-158ebed8410c-logs" (OuterVolumeSpecName: "logs") pod "1577e002-5267-48f4-b292-158ebed8410c" (UID: "1577e002-5267-48f4-b292-158ebed8410c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.882891 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "6a1e2c3e-f294-40f6-9b98-5fd16034f145" (UID: "6a1e2c3e-f294-40f6-9b98-5fd16034f145"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.882890 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1577e002-5267-48f4-b292-158ebed8410c-scripts" (OuterVolumeSpecName: "scripts") pod "1577e002-5267-48f4-b292-158ebed8410c" (UID: "1577e002-5267-48f4-b292-158ebed8410c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.882943 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1577e002-5267-48f4-b292-158ebed8410c-kube-api-access-kxhvh" (OuterVolumeSpecName: "kube-api-access-kxhvh") pod "1577e002-5267-48f4-b292-158ebed8410c" (UID: "1577e002-5267-48f4-b292-158ebed8410c"). InnerVolumeSpecName "kube-api-access-kxhvh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.883719 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a1e2c3e-f294-40f6-9b98-5fd16034f145-kube-api-access-flxlr" (OuterVolumeSpecName: "kube-api-access-flxlr") pod "6a1e2c3e-f294-40f6-9b98-5fd16034f145" (UID: "6a1e2c3e-f294-40f6-9b98-5fd16034f145"). InnerVolumeSpecName "kube-api-access-flxlr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.885360 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-scripts" (OuterVolumeSpecName: "scripts") pod "6a1e2c3e-f294-40f6-9b98-5fd16034f145" (UID: "6a1e2c3e-f294-40f6-9b98-5fd16034f145"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.886828 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "6a1e2c3e-f294-40f6-9b98-5fd16034f145" (UID: "6a1e2c3e-f294-40f6-9b98-5fd16034f145"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.904977 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6a1e2c3e-f294-40f6-9b98-5fd16034f145" (UID: "6a1e2c3e-f294-40f6-9b98-5fd16034f145"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.909164 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1577e002-5267-48f4-b292-158ebed8410c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1577e002-5267-48f4-b292-158ebed8410c" (UID: "1577e002-5267-48f4-b292-158ebed8410c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.913092 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1577e002-5267-48f4-b292-158ebed8410c-config-data" (OuterVolumeSpecName: "config-data") pod "1577e002-5267-48f4-b292-158ebed8410c" (UID: "1577e002-5267-48f4-b292-158ebed8410c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.921018 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-config-data" (OuterVolumeSpecName: "config-data") pod "6a1e2c3e-f294-40f6-9b98-5fd16034f145" (UID: "6a1e2c3e-f294-40f6-9b98-5fd16034f145"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.975612 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1577e002-5267-48f4-b292-158ebed8410c-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.975643 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.975653 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.975662 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1577e002-5267-48f4-b292-158ebed8410c-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.975671 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-flxlr\" (UniqueName: \"kubernetes.io/projected/6a1e2c3e-f294-40f6-9b98-5fd16034f145-kube-api-access-flxlr\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.975681 5039 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.975689 5039 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.975698 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a1e2c3e-f294-40f6-9b98-5fd16034f145-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.975706 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kxhvh\" (UniqueName: \"kubernetes.io/projected/1577e002-5267-48f4-b292-158ebed8410c-kube-api-access-kxhvh\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.975714 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1577e002-5267-48f4-b292-158ebed8410c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:39 crc kubenswrapper[5039]: I1124 13:40:39.976621 5039 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1577e002-5267-48f4-b292-158ebed8410c-logs\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.558558 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hmshf" event={"ID":"6a1e2c3e-f294-40f6-9b98-5fd16034f145","Type":"ContainerDied","Data":"b3f2cbe1301c38c14e44c75b8d657698f220398a73876fe1a1b579a92626d093"} Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.558613 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3f2cbe1301c38c14e44c75b8d657698f220398a73876fe1a1b579a92626d093" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.558700 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hmshf" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.564090 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-dvmqw" event={"ID":"1577e002-5267-48f4-b292-158ebed8410c","Type":"ContainerDied","Data":"0d13c32a93f999586434018e372d3fcd393db197362227457049e302ab95b74a"} Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.564142 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0d13c32a93f999586434018e372d3fcd393db197362227457049e302ab95b74a" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.564263 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-dvmqw" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.936236 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-576959578d-mb556"] Nov 24 13:40:40 crc kubenswrapper[5039]: E1124 13:40:40.936867 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a1e2c3e-f294-40f6-9b98-5fd16034f145" containerName="keystone-bootstrap" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.936882 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a1e2c3e-f294-40f6-9b98-5fd16034f145" containerName="keystone-bootstrap" Nov 24 13:40:40 crc kubenswrapper[5039]: E1124 13:40:40.936897 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="938759c5-f8df-4087-a815-e6346ce7de38" containerName="dnsmasq-dns" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.936904 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="938759c5-f8df-4087-a815-e6346ce7de38" containerName="dnsmasq-dns" Nov 24 13:40:40 crc kubenswrapper[5039]: E1124 13:40:40.936921 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="938759c5-f8df-4087-a815-e6346ce7de38" containerName="init" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.936927 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="938759c5-f8df-4087-a815-e6346ce7de38" containerName="init" Nov 24 13:40:40 crc kubenswrapper[5039]: E1124 13:40:40.936953 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1577e002-5267-48f4-b292-158ebed8410c" containerName="placement-db-sync" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.936959 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="1577e002-5267-48f4-b292-158ebed8410c" containerName="placement-db-sync" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.937116 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a1e2c3e-f294-40f6-9b98-5fd16034f145" containerName="keystone-bootstrap" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.937131 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="938759c5-f8df-4087-a815-e6346ce7de38" containerName="dnsmasq-dns" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.937139 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="1577e002-5267-48f4-b292-158ebed8410c" containerName="placement-db-sync" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.937936 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.949889 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.950194 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.950241 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-jsx5z" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.950384 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.950648 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.950676 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.956528 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-66cb4657dd-z97bx"] Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.958866 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.963066 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.963423 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.963596 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.963812 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.963952 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-78ffd" Nov 24 13:40:40 crc kubenswrapper[5039]: I1124 13:40:40.984425 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-576959578d-mb556"] Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.015000 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-66cb4657dd-z97bx"] Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.096578 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7cd31c1b-3250-444a-a717-88349d2c57a0-logs\") pod \"placement-66cb4657dd-z97bx\" (UID: \"7cd31c1b-3250-444a-a717-88349d2c57a0\") " pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.096910 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0a57e07-3e25-4329-9789-c3ff435860c3-public-tls-certs\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.097037 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a0a57e07-3e25-4329-9789-c3ff435860c3-credential-keys\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.097161 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0a57e07-3e25-4329-9789-c3ff435860c3-config-data\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.097242 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7cd31c1b-3250-444a-a717-88349d2c57a0-public-tls-certs\") pod \"placement-66cb4657dd-z97bx\" (UID: \"7cd31c1b-3250-444a-a717-88349d2c57a0\") " pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.097349 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0a57e07-3e25-4329-9789-c3ff435860c3-combined-ca-bundle\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.097457 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cd31c1b-3250-444a-a717-88349d2c57a0-combined-ca-bundle\") pod \"placement-66cb4657dd-z97bx\" (UID: \"7cd31c1b-3250-444a-a717-88349d2c57a0\") " pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.097580 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a0a57e07-3e25-4329-9789-c3ff435860c3-fernet-keys\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.097701 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0a57e07-3e25-4329-9789-c3ff435860c3-scripts\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.097907 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0a57e07-3e25-4329-9789-c3ff435860c3-internal-tls-certs\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.098006 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7cd31c1b-3250-444a-a717-88349d2c57a0-internal-tls-certs\") pod \"placement-66cb4657dd-z97bx\" (UID: \"7cd31c1b-3250-444a-a717-88349d2c57a0\") " pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.098050 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxhx9\" (UniqueName: \"kubernetes.io/projected/a0a57e07-3e25-4329-9789-c3ff435860c3-kube-api-access-kxhx9\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.098120 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2fzg\" (UniqueName: \"kubernetes.io/projected/7cd31c1b-3250-444a-a717-88349d2c57a0-kube-api-access-t2fzg\") pod \"placement-66cb4657dd-z97bx\" (UID: \"7cd31c1b-3250-444a-a717-88349d2c57a0\") " pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.098227 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7cd31c1b-3250-444a-a717-88349d2c57a0-scripts\") pod \"placement-66cb4657dd-z97bx\" (UID: \"7cd31c1b-3250-444a-a717-88349d2c57a0\") " pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.098371 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cd31c1b-3250-444a-a717-88349d2c57a0-config-data\") pod \"placement-66cb4657dd-z97bx\" (UID: \"7cd31c1b-3250-444a-a717-88349d2c57a0\") " pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.199687 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cd31c1b-3250-444a-a717-88349d2c57a0-config-data\") pod \"placement-66cb4657dd-z97bx\" (UID: \"7cd31c1b-3250-444a-a717-88349d2c57a0\") " pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.200069 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7cd31c1b-3250-444a-a717-88349d2c57a0-logs\") pod \"placement-66cb4657dd-z97bx\" (UID: \"7cd31c1b-3250-444a-a717-88349d2c57a0\") " pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.200220 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0a57e07-3e25-4329-9789-c3ff435860c3-public-tls-certs\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.200348 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a0a57e07-3e25-4329-9789-c3ff435860c3-credential-keys\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.200494 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0a57e07-3e25-4329-9789-c3ff435860c3-config-data\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.200631 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7cd31c1b-3250-444a-a717-88349d2c57a0-public-tls-certs\") pod \"placement-66cb4657dd-z97bx\" (UID: \"7cd31c1b-3250-444a-a717-88349d2c57a0\") " pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.200781 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0a57e07-3e25-4329-9789-c3ff435860c3-combined-ca-bundle\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.200847 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7cd31c1b-3250-444a-a717-88349d2c57a0-logs\") pod \"placement-66cb4657dd-z97bx\" (UID: \"7cd31c1b-3250-444a-a717-88349d2c57a0\") " pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.200930 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cd31c1b-3250-444a-a717-88349d2c57a0-combined-ca-bundle\") pod \"placement-66cb4657dd-z97bx\" (UID: \"7cd31c1b-3250-444a-a717-88349d2c57a0\") " pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.201223 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a0a57e07-3e25-4329-9789-c3ff435860c3-fernet-keys\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.201390 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0a57e07-3e25-4329-9789-c3ff435860c3-scripts\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.201539 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0a57e07-3e25-4329-9789-c3ff435860c3-internal-tls-certs\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.201707 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7cd31c1b-3250-444a-a717-88349d2c57a0-internal-tls-certs\") pod \"placement-66cb4657dd-z97bx\" (UID: \"7cd31c1b-3250-444a-a717-88349d2c57a0\") " pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.201855 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxhx9\" (UniqueName: \"kubernetes.io/projected/a0a57e07-3e25-4329-9789-c3ff435860c3-kube-api-access-kxhx9\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.201969 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2fzg\" (UniqueName: \"kubernetes.io/projected/7cd31c1b-3250-444a-a717-88349d2c57a0-kube-api-access-t2fzg\") pod \"placement-66cb4657dd-z97bx\" (UID: \"7cd31c1b-3250-444a-a717-88349d2c57a0\") " pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.202153 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7cd31c1b-3250-444a-a717-88349d2c57a0-scripts\") pod \"placement-66cb4657dd-z97bx\" (UID: \"7cd31c1b-3250-444a-a717-88349d2c57a0\") " pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.205183 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cd31c1b-3250-444a-a717-88349d2c57a0-combined-ca-bundle\") pod \"placement-66cb4657dd-z97bx\" (UID: \"7cd31c1b-3250-444a-a717-88349d2c57a0\") " pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.205974 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7cd31c1b-3250-444a-a717-88349d2c57a0-scripts\") pod \"placement-66cb4657dd-z97bx\" (UID: \"7cd31c1b-3250-444a-a717-88349d2c57a0\") " pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.207523 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cd31c1b-3250-444a-a717-88349d2c57a0-config-data\") pod \"placement-66cb4657dd-z97bx\" (UID: \"7cd31c1b-3250-444a-a717-88349d2c57a0\") " pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.213469 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0a57e07-3e25-4329-9789-c3ff435860c3-public-tls-certs\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.213484 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a0a57e07-3e25-4329-9789-c3ff435860c3-credential-keys\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.213802 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7cd31c1b-3250-444a-a717-88349d2c57a0-public-tls-certs\") pod \"placement-66cb4657dd-z97bx\" (UID: \"7cd31c1b-3250-444a-a717-88349d2c57a0\") " pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.213963 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0a57e07-3e25-4329-9789-c3ff435860c3-combined-ca-bundle\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.214137 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0a57e07-3e25-4329-9789-c3ff435860c3-internal-tls-certs\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.214965 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0a57e07-3e25-4329-9789-c3ff435860c3-config-data\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.216408 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7cd31c1b-3250-444a-a717-88349d2c57a0-internal-tls-certs\") pod \"placement-66cb4657dd-z97bx\" (UID: \"7cd31c1b-3250-444a-a717-88349d2c57a0\") " pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.217341 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0a57e07-3e25-4329-9789-c3ff435860c3-scripts\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.219305 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a0a57e07-3e25-4329-9789-c3ff435860c3-fernet-keys\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.220131 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2fzg\" (UniqueName: \"kubernetes.io/projected/7cd31c1b-3250-444a-a717-88349d2c57a0-kube-api-access-t2fzg\") pod \"placement-66cb4657dd-z97bx\" (UID: \"7cd31c1b-3250-444a-a717-88349d2c57a0\") " pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.229984 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxhx9\" (UniqueName: \"kubernetes.io/projected/a0a57e07-3e25-4329-9789-c3ff435860c3-kube-api-access-kxhx9\") pod \"keystone-576959578d-mb556\" (UID: \"a0a57e07-3e25-4329-9789-c3ff435860c3\") " pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.280080 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.291665 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.824855 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-66cb4657dd-z97bx"] Nov 24 13:40:41 crc kubenswrapper[5039]: W1124 13:40:41.831438 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7cd31c1b_3250_444a_a717_88349d2c57a0.slice/crio-f8a8e36dccfaf5c738ce0f5c91c870143ce187f5cc374c5369170f0dbb63ad54 WatchSource:0}: Error finding container f8a8e36dccfaf5c738ce0f5c91c870143ce187f5cc374c5369170f0dbb63ad54: Status 404 returned error can't find the container with id f8a8e36dccfaf5c738ce0f5c91c870143ce187f5cc374c5369170f0dbb63ad54 Nov 24 13:40:41 crc kubenswrapper[5039]: W1124 13:40:41.833843 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda0a57e07_3e25_4329_9789_c3ff435860c3.slice/crio-5bbe680c088f295531085aec4009918e81d9ccc81fc8a3ebe5e41c2584635c83 WatchSource:0}: Error finding container 5bbe680c088f295531085aec4009918e81d9ccc81fc8a3ebe5e41c2584635c83: Status 404 returned error can't find the container with id 5bbe680c088f295531085aec4009918e81d9ccc81fc8a3ebe5e41c2584635c83 Nov 24 13:40:41 crc kubenswrapper[5039]: I1124 13:40:41.835930 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-576959578d-mb556"] Nov 24 13:40:42 crc kubenswrapper[5039]: I1124 13:40:42.587608 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-576959578d-mb556" event={"ID":"a0a57e07-3e25-4329-9789-c3ff435860c3","Type":"ContainerStarted","Data":"561caeee0fe41fb6097fdcb6514f63ff2dd0560e05839e8bb6f8febe7db5689a"} Nov 24 13:40:42 crc kubenswrapper[5039]: I1124 13:40:42.587927 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-576959578d-mb556" event={"ID":"a0a57e07-3e25-4329-9789-c3ff435860c3","Type":"ContainerStarted","Data":"5bbe680c088f295531085aec4009918e81d9ccc81fc8a3ebe5e41c2584635c83"} Nov 24 13:40:42 crc kubenswrapper[5039]: I1124 13:40:42.588959 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-576959578d-mb556" Nov 24 13:40:42 crc kubenswrapper[5039]: I1124 13:40:42.590434 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-66cb4657dd-z97bx" event={"ID":"7cd31c1b-3250-444a-a717-88349d2c57a0","Type":"ContainerStarted","Data":"fad34ed44b4a2d3297b80e983579dfcb37d2bd1d0843814a53c6822c21c89fc2"} Nov 24 13:40:42 crc kubenswrapper[5039]: I1124 13:40:42.590459 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-66cb4657dd-z97bx" event={"ID":"7cd31c1b-3250-444a-a717-88349d2c57a0","Type":"ContainerStarted","Data":"f8a8e36dccfaf5c738ce0f5c91c870143ce187f5cc374c5369170f0dbb63ad54"} Nov 24 13:40:42 crc kubenswrapper[5039]: I1124 13:40:42.613394 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-576959578d-mb556" podStartSLOduration=2.613354549 podStartE2EDuration="2.613354549s" podCreationTimestamp="2025-11-24 13:40:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:40:42.611401011 +0000 UTC m=+1355.050525511" watchObservedRunningTime="2025-11-24 13:40:42.613354549 +0000 UTC m=+1355.052479049" Nov 24 13:40:43 crc kubenswrapper[5039]: I1124 13:40:43.619926 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"158b7bf7-1207-4509-bb3f-d666847eb59d","Type":"ContainerStarted","Data":"ae8cd0912319598871420e063001336b50c3773c4c4d4bd25ccf57e0419f0a2c"} Nov 24 13:40:43 crc kubenswrapper[5039]: I1124 13:40:43.624052 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-x49zw" event={"ID":"4ab794c0-2264-4041-b697-ef7829a5129a","Type":"ContainerStarted","Data":"f3fe0c60386d097c989c6dbfd9c6b2c46f56109fc7c14e1eea13c0b07ccf2fc8"} Nov 24 13:40:43 crc kubenswrapper[5039]: I1124 13:40:43.628229 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-66cb4657dd-z97bx" event={"ID":"7cd31c1b-3250-444a-a717-88349d2c57a0","Type":"ContainerStarted","Data":"3e15c8c716f641d5986d8a576da99b497019b7eb3d39894e88ad3c9cd6b8c6ed"} Nov 24 13:40:43 crc kubenswrapper[5039]: I1124 13:40:43.628368 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:43 crc kubenswrapper[5039]: I1124 13:40:43.644111 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-x49zw" podStartSLOduration=10.213879603 podStartE2EDuration="1m0.644091961s" podCreationTimestamp="2025-11-24 13:39:43 +0000 UTC" firstStartedPulling="2025-11-24 13:39:52.188443701 +0000 UTC m=+1304.627568201" lastFinishedPulling="2025-11-24 13:40:42.618656059 +0000 UTC m=+1355.057780559" observedRunningTime="2025-11-24 13:40:43.643967358 +0000 UTC m=+1356.083091868" watchObservedRunningTime="2025-11-24 13:40:43.644091961 +0000 UTC m=+1356.083216461" Nov 24 13:40:43 crc kubenswrapper[5039]: I1124 13:40:43.672307 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-66cb4657dd-z97bx" podStartSLOduration=3.67228201 podStartE2EDuration="3.67228201s" podCreationTimestamp="2025-11-24 13:40:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:40:43.664422508 +0000 UTC m=+1356.103547008" watchObservedRunningTime="2025-11-24 13:40:43.67228201 +0000 UTC m=+1356.111406510" Nov 24 13:40:44 crc kubenswrapper[5039]: I1124 13:40:44.642710 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-47dzd" event={"ID":"cb2d453a-99e6-4593-ad2d-a57c7a2c2519","Type":"ContainerStarted","Data":"e33183e69a2bed150c7c16dc7ff7d80a50190a0a951167491f30768b6e5f5a5e"} Nov 24 13:40:44 crc kubenswrapper[5039]: I1124 13:40:44.644026 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:40:44 crc kubenswrapper[5039]: I1124 13:40:44.663968 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-47dzd" podStartSLOduration=10.658410717 podStartE2EDuration="1m1.663946287s" podCreationTimestamp="2025-11-24 13:39:43 +0000 UTC" firstStartedPulling="2025-11-24 13:39:52.1736781 +0000 UTC m=+1304.612802600" lastFinishedPulling="2025-11-24 13:40:43.17921367 +0000 UTC m=+1355.618338170" observedRunningTime="2025-11-24 13:40:44.659726044 +0000 UTC m=+1357.098850544" watchObservedRunningTime="2025-11-24 13:40:44.663946287 +0000 UTC m=+1357.103070797" Nov 24 13:40:45 crc kubenswrapper[5039]: I1124 13:40:45.655581 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-qvrwf" event={"ID":"7c0313ce-4944-4fad-bce0-47d60b273f69","Type":"ContainerStarted","Data":"6fac4d240154030f2adbe813a51f23a6104b252d2fe89d53286245577c472cfb"} Nov 24 13:40:45 crc kubenswrapper[5039]: I1124 13:40:45.687554 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-qvrwf" podStartSLOduration=9.740350691 podStartE2EDuration="1m2.687534304s" podCreationTimestamp="2025-11-24 13:39:43 +0000 UTC" firstStartedPulling="2025-11-24 13:39:51.865986404 +0000 UTC m=+1304.305110904" lastFinishedPulling="2025-11-24 13:40:44.813169997 +0000 UTC m=+1357.252294517" observedRunningTime="2025-11-24 13:40:45.678915354 +0000 UTC m=+1358.118039854" watchObservedRunningTime="2025-11-24 13:40:45.687534304 +0000 UTC m=+1358.126658804" Nov 24 13:40:46 crc kubenswrapper[5039]: I1124 13:40:46.673419 5039 generic.go:334] "Generic (PLEG): container finished" podID="4ab794c0-2264-4041-b697-ef7829a5129a" containerID="f3fe0c60386d097c989c6dbfd9c6b2c46f56109fc7c14e1eea13c0b07ccf2fc8" exitCode=0 Nov 24 13:40:46 crc kubenswrapper[5039]: I1124 13:40:46.673473 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-x49zw" event={"ID":"4ab794c0-2264-4041-b697-ef7829a5129a","Type":"ContainerDied","Data":"f3fe0c60386d097c989c6dbfd9c6b2c46f56109fc7c14e1eea13c0b07ccf2fc8"} Nov 24 13:40:48 crc kubenswrapper[5039]: I1124 13:40:48.693752 5039 generic.go:334] "Generic (PLEG): container finished" podID="a3938495-f119-4641-b76b-0333c1391b24" containerID="36249bd63021c1e97ceee74e50e1de631a98586dfedf719d9a4b4afae3b296a9" exitCode=0 Nov 24 13:40:48 crc kubenswrapper[5039]: I1124 13:40:48.693851 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-wgc6v" event={"ID":"a3938495-f119-4641-b76b-0333c1391b24","Type":"ContainerDied","Data":"36249bd63021c1e97ceee74e50e1de631a98586dfedf719d9a4b4afae3b296a9"} Nov 24 13:40:49 crc kubenswrapper[5039]: I1124 13:40:49.481307 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-x49zw" Nov 24 13:40:49 crc kubenswrapper[5039]: I1124 13:40:49.587884 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ab794c0-2264-4041-b697-ef7829a5129a-combined-ca-bundle\") pod \"4ab794c0-2264-4041-b697-ef7829a5129a\" (UID: \"4ab794c0-2264-4041-b697-ef7829a5129a\") " Nov 24 13:40:49 crc kubenswrapper[5039]: I1124 13:40:49.588022 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4ab794c0-2264-4041-b697-ef7829a5129a-db-sync-config-data\") pod \"4ab794c0-2264-4041-b697-ef7829a5129a\" (UID: \"4ab794c0-2264-4041-b697-ef7829a5129a\") " Nov 24 13:40:49 crc kubenswrapper[5039]: I1124 13:40:49.588168 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dhlfq\" (UniqueName: \"kubernetes.io/projected/4ab794c0-2264-4041-b697-ef7829a5129a-kube-api-access-dhlfq\") pod \"4ab794c0-2264-4041-b697-ef7829a5129a\" (UID: \"4ab794c0-2264-4041-b697-ef7829a5129a\") " Nov 24 13:40:49 crc kubenswrapper[5039]: I1124 13:40:49.594939 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ab794c0-2264-4041-b697-ef7829a5129a-kube-api-access-dhlfq" (OuterVolumeSpecName: "kube-api-access-dhlfq") pod "4ab794c0-2264-4041-b697-ef7829a5129a" (UID: "4ab794c0-2264-4041-b697-ef7829a5129a"). InnerVolumeSpecName "kube-api-access-dhlfq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:40:49 crc kubenswrapper[5039]: I1124 13:40:49.596393 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ab794c0-2264-4041-b697-ef7829a5129a-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "4ab794c0-2264-4041-b697-ef7829a5129a" (UID: "4ab794c0-2264-4041-b697-ef7829a5129a"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:49 crc kubenswrapper[5039]: I1124 13:40:49.618303 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ab794c0-2264-4041-b697-ef7829a5129a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4ab794c0-2264-4041-b697-ef7829a5129a" (UID: "4ab794c0-2264-4041-b697-ef7829a5129a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:49 crc kubenswrapper[5039]: I1124 13:40:49.690955 5039 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4ab794c0-2264-4041-b697-ef7829a5129a-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:49 crc kubenswrapper[5039]: I1124 13:40:49.690994 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dhlfq\" (UniqueName: \"kubernetes.io/projected/4ab794c0-2264-4041-b697-ef7829a5129a-kube-api-access-dhlfq\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:49 crc kubenswrapper[5039]: I1124 13:40:49.691006 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ab794c0-2264-4041-b697-ef7829a5129a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:49 crc kubenswrapper[5039]: I1124 13:40:49.703605 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-x49zw" event={"ID":"4ab794c0-2264-4041-b697-ef7829a5129a","Type":"ContainerDied","Data":"45bd170283b7e9cd3031e9311c85268801a1f71ad9040cb0f989e7731a96a449"} Nov 24 13:40:49 crc kubenswrapper[5039]: I1124 13:40:49.703658 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-x49zw" Nov 24 13:40:49 crc kubenswrapper[5039]: I1124 13:40:49.703667 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="45bd170283b7e9cd3031e9311c85268801a1f71ad9040cb0f989e7731a96a449" Nov 24 13:40:49 crc kubenswrapper[5039]: E1124 13:40:49.993801 5039 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7c0313ce_4944_4fad_bce0_47d60b273f69.slice/crio-6fac4d240154030f2adbe813a51f23a6104b252d2fe89d53286245577c472cfb.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7c0313ce_4944_4fad_bce0_47d60b273f69.slice/crio-conmon-6fac4d240154030f2adbe813a51f23a6104b252d2fe89d53286245577c472cfb.scope\": RecentStats: unable to find data in memory cache]" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.766641 5039 generic.go:334] "Generic (PLEG): container finished" podID="2c22dae7-e545-4eb0-9552-f3c691f397df" containerID="91f775a9e48154d99b12708d5794be3c4a30d39b15993c9d325891276817ad85" exitCode=0 Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.766927 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-vvjsm" event={"ID":"2c22dae7-e545-4eb0-9552-f3c691f397df","Type":"ContainerDied","Data":"91f775a9e48154d99b12708d5794be3c4a30d39b15993c9d325891276817ad85"} Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.792475 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-wgc6v" event={"ID":"a3938495-f119-4641-b76b-0333c1391b24","Type":"ContainerDied","Data":"416a9903ba00df7ea466b990e781cc84025b0645a48fbe7e8ec2fc807cd3a8e4"} Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.792542 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="416a9903ba00df7ea466b990e781cc84025b0645a48fbe7e8ec2fc807cd3a8e4" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.807131 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-746f5fd69d-mww4x"] Nov 24 13:40:50 crc kubenswrapper[5039]: E1124 13:40:50.807695 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ab794c0-2264-4041-b697-ef7829a5129a" containerName="barbican-db-sync" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.807721 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ab794c0-2264-4041-b697-ef7829a5129a" containerName="barbican-db-sync" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.807984 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ab794c0-2264-4041-b697-ef7829a5129a" containerName="barbican-db-sync" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.812206 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-746f5fd69d-mww4x" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.816302 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.832356 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-grn48" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.832692 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.832781 5039 generic.go:334] "Generic (PLEG): container finished" podID="7c0313ce-4944-4fad-bce0-47d60b273f69" containerID="6fac4d240154030f2adbe813a51f23a6104b252d2fe89d53286245577c472cfb" exitCode=0 Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.832877 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-qvrwf" event={"ID":"7c0313ce-4944-4fad-bce0-47d60b273f69","Type":"ContainerDied","Data":"6fac4d240154030f2adbe813a51f23a6104b252d2fe89d53286245577c472cfb"} Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.836532 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-7f697665cf-n6vcs"] Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.838137 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7f697665cf-n6vcs" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.840279 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.855005 5039 generic.go:334] "Generic (PLEG): container finished" podID="cb2d453a-99e6-4593-ad2d-a57c7a2c2519" containerID="e33183e69a2bed150c7c16dc7ff7d80a50190a0a951167491f30768b6e5f5a5e" exitCode=0 Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.855056 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-47dzd" event={"ID":"cb2d453a-99e6-4593-ad2d-a57c7a2c2519","Type":"ContainerDied","Data":"e33183e69a2bed150c7c16dc7ff7d80a50190a0a951167491f30768b6e5f5a5e"} Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.874079 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-746f5fd69d-mww4x"] Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.888409 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-wgc6v" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.921243 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7f697665cf-n6vcs"] Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.937646 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/df3df8f8-f89f-4eab-98af-d7dd6cfe17da-logs\") pod \"barbican-keystone-listener-746f5fd69d-mww4x\" (UID: \"df3df8f8-f89f-4eab-98af-d7dd6cfe17da\") " pod="openstack/barbican-keystone-listener-746f5fd69d-mww4x" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.937691 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df3df8f8-f89f-4eab-98af-d7dd6cfe17da-config-data\") pod \"barbican-keystone-listener-746f5fd69d-mww4x\" (UID: \"df3df8f8-f89f-4eab-98af-d7dd6cfe17da\") " pod="openstack/barbican-keystone-listener-746f5fd69d-mww4x" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.937715 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/df3df8f8-f89f-4eab-98af-d7dd6cfe17da-config-data-custom\") pod \"barbican-keystone-listener-746f5fd69d-mww4x\" (UID: \"df3df8f8-f89f-4eab-98af-d7dd6cfe17da\") " pod="openstack/barbican-keystone-listener-746f5fd69d-mww4x" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.937752 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7tw7j\" (UniqueName: \"kubernetes.io/projected/1228dfc2-bfeb-4ba9-b0f8-ac276a2207be-kube-api-access-7tw7j\") pod \"barbican-worker-7f697665cf-n6vcs\" (UID: \"1228dfc2-bfeb-4ba9-b0f8-ac276a2207be\") " pod="openstack/barbican-worker-7f697665cf-n6vcs" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.937791 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df3df8f8-f89f-4eab-98af-d7dd6cfe17da-combined-ca-bundle\") pod \"barbican-keystone-listener-746f5fd69d-mww4x\" (UID: \"df3df8f8-f89f-4eab-98af-d7dd6cfe17da\") " pod="openstack/barbican-keystone-listener-746f5fd69d-mww4x" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.937855 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1228dfc2-bfeb-4ba9-b0f8-ac276a2207be-config-data\") pod \"barbican-worker-7f697665cf-n6vcs\" (UID: \"1228dfc2-bfeb-4ba9-b0f8-ac276a2207be\") " pod="openstack/barbican-worker-7f697665cf-n6vcs" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.937898 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1228dfc2-bfeb-4ba9-b0f8-ac276a2207be-config-data-custom\") pod \"barbican-worker-7f697665cf-n6vcs\" (UID: \"1228dfc2-bfeb-4ba9-b0f8-ac276a2207be\") " pod="openstack/barbican-worker-7f697665cf-n6vcs" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.937932 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1228dfc2-bfeb-4ba9-b0f8-ac276a2207be-logs\") pod \"barbican-worker-7f697665cf-n6vcs\" (UID: \"1228dfc2-bfeb-4ba9-b0f8-ac276a2207be\") " pod="openstack/barbican-worker-7f697665cf-n6vcs" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.937956 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1228dfc2-bfeb-4ba9-b0f8-ac276a2207be-combined-ca-bundle\") pod \"barbican-worker-7f697665cf-n6vcs\" (UID: \"1228dfc2-bfeb-4ba9-b0f8-ac276a2207be\") " pod="openstack/barbican-worker-7f697665cf-n6vcs" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.937991 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dn67w\" (UniqueName: \"kubernetes.io/projected/df3df8f8-f89f-4eab-98af-d7dd6cfe17da-kube-api-access-dn67w\") pod \"barbican-keystone-listener-746f5fd69d-mww4x\" (UID: \"df3df8f8-f89f-4eab-98af-d7dd6cfe17da\") " pod="openstack/barbican-keystone-listener-746f5fd69d-mww4x" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.985319 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7979dc8455-9nppj"] Nov 24 13:40:50 crc kubenswrapper[5039]: E1124 13:40:50.985894 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3938495-f119-4641-b76b-0333c1391b24" containerName="neutron-db-sync" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.985926 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3938495-f119-4641-b76b-0333c1391b24" containerName="neutron-db-sync" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.986197 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3938495-f119-4641-b76b-0333c1391b24" containerName="neutron-db-sync" Nov 24 13:40:50 crc kubenswrapper[5039]: I1124 13:40:50.987601 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.018034 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7979dc8455-9nppj"] Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.040554 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5d2jk\" (UniqueName: \"kubernetes.io/projected/a3938495-f119-4641-b76b-0333c1391b24-kube-api-access-5d2jk\") pod \"a3938495-f119-4641-b76b-0333c1391b24\" (UID: \"a3938495-f119-4641-b76b-0333c1391b24\") " Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.040741 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3938495-f119-4641-b76b-0333c1391b24-combined-ca-bundle\") pod \"a3938495-f119-4641-b76b-0333c1391b24\" (UID: \"a3938495-f119-4641-b76b-0333c1391b24\") " Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.042668 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a3938495-f119-4641-b76b-0333c1391b24-config\") pod \"a3938495-f119-4641-b76b-0333c1391b24\" (UID: \"a3938495-f119-4641-b76b-0333c1391b24\") " Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.043101 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1228dfc2-bfeb-4ba9-b0f8-ac276a2207be-logs\") pod \"barbican-worker-7f697665cf-n6vcs\" (UID: \"1228dfc2-bfeb-4ba9-b0f8-ac276a2207be\") " pod="openstack/barbican-worker-7f697665cf-n6vcs" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.043219 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1228dfc2-bfeb-4ba9-b0f8-ac276a2207be-combined-ca-bundle\") pod \"barbican-worker-7f697665cf-n6vcs\" (UID: \"1228dfc2-bfeb-4ba9-b0f8-ac276a2207be\") " pod="openstack/barbican-worker-7f697665cf-n6vcs" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.043342 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dn67w\" (UniqueName: \"kubernetes.io/projected/df3df8f8-f89f-4eab-98af-d7dd6cfe17da-kube-api-access-dn67w\") pod \"barbican-keystone-listener-746f5fd69d-mww4x\" (UID: \"df3df8f8-f89f-4eab-98af-d7dd6cfe17da\") " pod="openstack/barbican-keystone-listener-746f5fd69d-mww4x" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.043632 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/df3df8f8-f89f-4eab-98af-d7dd6cfe17da-logs\") pod \"barbican-keystone-listener-746f5fd69d-mww4x\" (UID: \"df3df8f8-f89f-4eab-98af-d7dd6cfe17da\") " pod="openstack/barbican-keystone-listener-746f5fd69d-mww4x" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.043760 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df3df8f8-f89f-4eab-98af-d7dd6cfe17da-config-data\") pod \"barbican-keystone-listener-746f5fd69d-mww4x\" (UID: \"df3df8f8-f89f-4eab-98af-d7dd6cfe17da\") " pod="openstack/barbican-keystone-listener-746f5fd69d-mww4x" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.043868 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/df3df8f8-f89f-4eab-98af-d7dd6cfe17da-config-data-custom\") pod \"barbican-keystone-listener-746f5fd69d-mww4x\" (UID: \"df3df8f8-f89f-4eab-98af-d7dd6cfe17da\") " pod="openstack/barbican-keystone-listener-746f5fd69d-mww4x" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.044007 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7tw7j\" (UniqueName: \"kubernetes.io/projected/1228dfc2-bfeb-4ba9-b0f8-ac276a2207be-kube-api-access-7tw7j\") pod \"barbican-worker-7f697665cf-n6vcs\" (UID: \"1228dfc2-bfeb-4ba9-b0f8-ac276a2207be\") " pod="openstack/barbican-worker-7f697665cf-n6vcs" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.044215 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df3df8f8-f89f-4eab-98af-d7dd6cfe17da-combined-ca-bundle\") pod \"barbican-keystone-listener-746f5fd69d-mww4x\" (UID: \"df3df8f8-f89f-4eab-98af-d7dd6cfe17da\") " pod="openstack/barbican-keystone-listener-746f5fd69d-mww4x" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.044693 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1228dfc2-bfeb-4ba9-b0f8-ac276a2207be-config-data\") pod \"barbican-worker-7f697665cf-n6vcs\" (UID: \"1228dfc2-bfeb-4ba9-b0f8-ac276a2207be\") " pod="openstack/barbican-worker-7f697665cf-n6vcs" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.044839 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1228dfc2-bfeb-4ba9-b0f8-ac276a2207be-config-data-custom\") pod \"barbican-worker-7f697665cf-n6vcs\" (UID: \"1228dfc2-bfeb-4ba9-b0f8-ac276a2207be\") " pod="openstack/barbican-worker-7f697665cf-n6vcs" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.047607 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/df3df8f8-f89f-4eab-98af-d7dd6cfe17da-logs\") pod \"barbican-keystone-listener-746f5fd69d-mww4x\" (UID: \"df3df8f8-f89f-4eab-98af-d7dd6cfe17da\") " pod="openstack/barbican-keystone-listener-746f5fd69d-mww4x" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.047923 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1228dfc2-bfeb-4ba9-b0f8-ac276a2207be-logs\") pod \"barbican-worker-7f697665cf-n6vcs\" (UID: \"1228dfc2-bfeb-4ba9-b0f8-ac276a2207be\") " pod="openstack/barbican-worker-7f697665cf-n6vcs" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.050756 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3938495-f119-4641-b76b-0333c1391b24-kube-api-access-5d2jk" (OuterVolumeSpecName: "kube-api-access-5d2jk") pod "a3938495-f119-4641-b76b-0333c1391b24" (UID: "a3938495-f119-4641-b76b-0333c1391b24"). InnerVolumeSpecName "kube-api-access-5d2jk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.053414 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1228dfc2-bfeb-4ba9-b0f8-ac276a2207be-combined-ca-bundle\") pod \"barbican-worker-7f697665cf-n6vcs\" (UID: \"1228dfc2-bfeb-4ba9-b0f8-ac276a2207be\") " pod="openstack/barbican-worker-7f697665cf-n6vcs" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.054000 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1228dfc2-bfeb-4ba9-b0f8-ac276a2207be-config-data-custom\") pod \"barbican-worker-7f697665cf-n6vcs\" (UID: \"1228dfc2-bfeb-4ba9-b0f8-ac276a2207be\") " pod="openstack/barbican-worker-7f697665cf-n6vcs" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.054415 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df3df8f8-f89f-4eab-98af-d7dd6cfe17da-combined-ca-bundle\") pod \"barbican-keystone-listener-746f5fd69d-mww4x\" (UID: \"df3df8f8-f89f-4eab-98af-d7dd6cfe17da\") " pod="openstack/barbican-keystone-listener-746f5fd69d-mww4x" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.066987 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/df3df8f8-f89f-4eab-98af-d7dd6cfe17da-config-data-custom\") pod \"barbican-keystone-listener-746f5fd69d-mww4x\" (UID: \"df3df8f8-f89f-4eab-98af-d7dd6cfe17da\") " pod="openstack/barbican-keystone-listener-746f5fd69d-mww4x" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.067157 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df3df8f8-f89f-4eab-98af-d7dd6cfe17da-config-data\") pod \"barbican-keystone-listener-746f5fd69d-mww4x\" (UID: \"df3df8f8-f89f-4eab-98af-d7dd6cfe17da\") " pod="openstack/barbican-keystone-listener-746f5fd69d-mww4x" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.078428 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7tw7j\" (UniqueName: \"kubernetes.io/projected/1228dfc2-bfeb-4ba9-b0f8-ac276a2207be-kube-api-access-7tw7j\") pod \"barbican-worker-7f697665cf-n6vcs\" (UID: \"1228dfc2-bfeb-4ba9-b0f8-ac276a2207be\") " pod="openstack/barbican-worker-7f697665cf-n6vcs" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.080135 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1228dfc2-bfeb-4ba9-b0f8-ac276a2207be-config-data\") pod \"barbican-worker-7f697665cf-n6vcs\" (UID: \"1228dfc2-bfeb-4ba9-b0f8-ac276a2207be\") " pod="openstack/barbican-worker-7f697665cf-n6vcs" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.081646 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dn67w\" (UniqueName: \"kubernetes.io/projected/df3df8f8-f89f-4eab-98af-d7dd6cfe17da-kube-api-access-dn67w\") pod \"barbican-keystone-listener-746f5fd69d-mww4x\" (UID: \"df3df8f8-f89f-4eab-98af-d7dd6cfe17da\") " pod="openstack/barbican-keystone-listener-746f5fd69d-mww4x" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.085613 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-cd9b99d5d-xfvxc"] Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.087227 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.089619 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.094481 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-cd9b99d5d-xfvxc"] Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.097606 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3938495-f119-4641-b76b-0333c1391b24-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a3938495-f119-4641-b76b-0333c1391b24" (UID: "a3938495-f119-4641-b76b-0333c1391b24"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.132900 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3938495-f119-4641-b76b-0333c1391b24-config" (OuterVolumeSpecName: "config") pod "a3938495-f119-4641-b76b-0333c1391b24" (UID: "a3938495-f119-4641-b76b-0333c1391b24"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.147874 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-dns-svc\") pod \"dnsmasq-dns-7979dc8455-9nppj\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.147927 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-ovsdbserver-nb\") pod \"dnsmasq-dns-7979dc8455-9nppj\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.147960 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-config\") pod \"dnsmasq-dns-7979dc8455-9nppj\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.148020 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-dns-swift-storage-0\") pod \"dnsmasq-dns-7979dc8455-9nppj\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.148067 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25hr5\" (UniqueName: \"kubernetes.io/projected/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-kube-api-access-25hr5\") pod \"dnsmasq-dns-7979dc8455-9nppj\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.148101 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-ovsdbserver-sb\") pod \"dnsmasq-dns-7979dc8455-9nppj\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.148278 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5d2jk\" (UniqueName: \"kubernetes.io/projected/a3938495-f119-4641-b76b-0333c1391b24-kube-api-access-5d2jk\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.148290 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3938495-f119-4641-b76b-0333c1391b24-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.148299 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/a3938495-f119-4641-b76b-0333c1391b24-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.212492 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-746f5fd69d-mww4x" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.250051 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b291da35-220c-4325-bef8-891d00c483cb-logs\") pod \"barbican-api-cd9b99d5d-xfvxc\" (UID: \"b291da35-220c-4325-bef8-891d00c483cb\") " pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.250100 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b291da35-220c-4325-bef8-891d00c483cb-config-data\") pod \"barbican-api-cd9b99d5d-xfvxc\" (UID: \"b291da35-220c-4325-bef8-891d00c483cb\") " pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.250138 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25hr5\" (UniqueName: \"kubernetes.io/projected/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-kube-api-access-25hr5\") pod \"dnsmasq-dns-7979dc8455-9nppj\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.250169 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-ovsdbserver-sb\") pod \"dnsmasq-dns-7979dc8455-9nppj\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.250216 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b291da35-220c-4325-bef8-891d00c483cb-config-data-custom\") pod \"barbican-api-cd9b99d5d-xfvxc\" (UID: \"b291da35-220c-4325-bef8-891d00c483cb\") " pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.250492 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-dns-svc\") pod \"dnsmasq-dns-7979dc8455-9nppj\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.250595 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-ovsdbserver-nb\") pod \"dnsmasq-dns-7979dc8455-9nppj\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.250664 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b291da35-220c-4325-bef8-891d00c483cb-combined-ca-bundle\") pod \"barbican-api-cd9b99d5d-xfvxc\" (UID: \"b291da35-220c-4325-bef8-891d00c483cb\") " pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.250962 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-config\") pod \"dnsmasq-dns-7979dc8455-9nppj\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.251082 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjn4x\" (UniqueName: \"kubernetes.io/projected/b291da35-220c-4325-bef8-891d00c483cb-kube-api-access-tjn4x\") pod \"barbican-api-cd9b99d5d-xfvxc\" (UID: \"b291da35-220c-4325-bef8-891d00c483cb\") " pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.251244 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-dns-swift-storage-0\") pod \"dnsmasq-dns-7979dc8455-9nppj\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.251600 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-ovsdbserver-sb\") pod \"dnsmasq-dns-7979dc8455-9nppj\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.253294 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-config\") pod \"dnsmasq-dns-7979dc8455-9nppj\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.254966 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-dns-swift-storage-0\") pod \"dnsmasq-dns-7979dc8455-9nppj\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.255189 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-dns-svc\") pod \"dnsmasq-dns-7979dc8455-9nppj\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.255201 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-ovsdbserver-nb\") pod \"dnsmasq-dns-7979dc8455-9nppj\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.269076 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25hr5\" (UniqueName: \"kubernetes.io/projected/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-kube-api-access-25hr5\") pod \"dnsmasq-dns-7979dc8455-9nppj\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.303274 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7f697665cf-n6vcs" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.353302 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b291da35-220c-4325-bef8-891d00c483cb-combined-ca-bundle\") pod \"barbican-api-cd9b99d5d-xfvxc\" (UID: \"b291da35-220c-4325-bef8-891d00c483cb\") " pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.353349 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjn4x\" (UniqueName: \"kubernetes.io/projected/b291da35-220c-4325-bef8-891d00c483cb-kube-api-access-tjn4x\") pod \"barbican-api-cd9b99d5d-xfvxc\" (UID: \"b291da35-220c-4325-bef8-891d00c483cb\") " pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.353395 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b291da35-220c-4325-bef8-891d00c483cb-logs\") pod \"barbican-api-cd9b99d5d-xfvxc\" (UID: \"b291da35-220c-4325-bef8-891d00c483cb\") " pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.353419 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b291da35-220c-4325-bef8-891d00c483cb-config-data\") pod \"barbican-api-cd9b99d5d-xfvxc\" (UID: \"b291da35-220c-4325-bef8-891d00c483cb\") " pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.353492 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b291da35-220c-4325-bef8-891d00c483cb-config-data-custom\") pod \"barbican-api-cd9b99d5d-xfvxc\" (UID: \"b291da35-220c-4325-bef8-891d00c483cb\") " pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.355341 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b291da35-220c-4325-bef8-891d00c483cb-logs\") pod \"barbican-api-cd9b99d5d-xfvxc\" (UID: \"b291da35-220c-4325-bef8-891d00c483cb\") " pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.357447 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.359163 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b291da35-220c-4325-bef8-891d00c483cb-config-data-custom\") pod \"barbican-api-cd9b99d5d-xfvxc\" (UID: \"b291da35-220c-4325-bef8-891d00c483cb\") " pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.363287 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b291da35-220c-4325-bef8-891d00c483cb-combined-ca-bundle\") pod \"barbican-api-cd9b99d5d-xfvxc\" (UID: \"b291da35-220c-4325-bef8-891d00c483cb\") " pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.363623 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b291da35-220c-4325-bef8-891d00c483cb-config-data\") pod \"barbican-api-cd9b99d5d-xfvxc\" (UID: \"b291da35-220c-4325-bef8-891d00c483cb\") " pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.377915 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjn4x\" (UniqueName: \"kubernetes.io/projected/b291da35-220c-4325-bef8-891d00c483cb-kube-api-access-tjn4x\") pod \"barbican-api-cd9b99d5d-xfvxc\" (UID: \"b291da35-220c-4325-bef8-891d00c483cb\") " pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.414036 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.734390 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-746f5fd69d-mww4x"] Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.847291 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7f697665cf-n6vcs"] Nov 24 13:40:51 crc kubenswrapper[5039]: W1124 13:40:51.852147 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1228dfc2_bfeb_4ba9_b0f8_ac276a2207be.slice/crio-7d1bb09d3c0290fa324d8a6fbe3840fe5d3be2097268304d8bec3801477e955e WatchSource:0}: Error finding container 7d1bb09d3c0290fa324d8a6fbe3840fe5d3be2097268304d8bec3801477e955e: Status 404 returned error can't find the container with id 7d1bb09d3c0290fa324d8a6fbe3840fe5d3be2097268304d8bec3801477e955e Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.870738 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"158b7bf7-1207-4509-bb3f-d666847eb59d","Type":"ContainerStarted","Data":"15134f683af8e079b3b63a39bb9b45b623759db0f04ab5e5a95d5b08bdc8d70c"} Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.870912 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.870885 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="158b7bf7-1207-4509-bb3f-d666847eb59d" containerName="ceilometer-central-agent" containerID="cri-o://50b846d0dd3fa1f7b3ebc5917ca54f611564534f5d4050a457263515aeadf570" gracePeriod=30 Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.870933 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="158b7bf7-1207-4509-bb3f-d666847eb59d" containerName="proxy-httpd" containerID="cri-o://15134f683af8e079b3b63a39bb9b45b623759db0f04ab5e5a95d5b08bdc8d70c" gracePeriod=30 Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.870971 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="158b7bf7-1207-4509-bb3f-d666847eb59d" containerName="sg-core" containerID="cri-o://ae8cd0912319598871420e063001336b50c3773c4c4d4bd25ccf57e0419f0a2c" gracePeriod=30 Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.871057 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="158b7bf7-1207-4509-bb3f-d666847eb59d" containerName="ceilometer-notification-agent" containerID="cri-o://8fc8f7a586d558f2f11d83f27837cff6495aeae1ae5fbea7c4dca0f303501b07" gracePeriod=30 Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.874797 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7f697665cf-n6vcs" event={"ID":"1228dfc2-bfeb-4ba9-b0f8-ac276a2207be","Type":"ContainerStarted","Data":"7d1bb09d3c0290fa324d8a6fbe3840fe5d3be2097268304d8bec3801477e955e"} Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.891715 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-746f5fd69d-mww4x" event={"ID":"df3df8f8-f89f-4eab-98af-d7dd6cfe17da","Type":"ContainerStarted","Data":"74958ac71067abb818c28c2be28c27039157aad21a2ed9db0f38e75f4419c41c"} Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.891983 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-wgc6v" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.903449 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=10.535279183 podStartE2EDuration="1m8.903369366s" podCreationTimestamp="2025-11-24 13:39:43 +0000 UTC" firstStartedPulling="2025-11-24 13:39:52.361260808 +0000 UTC m=+1304.800385318" lastFinishedPulling="2025-11-24 13:40:50.729351011 +0000 UTC m=+1363.168475501" observedRunningTime="2025-11-24 13:40:51.892462897 +0000 UTC m=+1364.331587417" watchObservedRunningTime="2025-11-24 13:40:51.903369366 +0000 UTC m=+1364.342493866" Nov 24 13:40:51 crc kubenswrapper[5039]: I1124 13:40:51.957229 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7979dc8455-9nppj"] Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.135343 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-cd9b99d5d-xfvxc"] Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.165710 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7979dc8455-9nppj"] Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.235564 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-789c5c5cb7-k9zn2"] Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.237592 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.243134 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-c4b94795b-c6c2f"] Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.246191 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c4b94795b-c6c2f" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.254684 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-789c5c5cb7-k9zn2"] Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.260436 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.260708 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.260959 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.340677 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-h6hqs" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.395258 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5lwdt\" (UniqueName: \"kubernetes.io/projected/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-kube-api-access-5lwdt\") pod \"neutron-c4b94795b-c6c2f\" (UID: \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\") " pod="openstack/neutron-c4b94795b-c6c2f" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.395310 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-config\") pod \"neutron-c4b94795b-c6c2f\" (UID: \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\") " pod="openstack/neutron-c4b94795b-c6c2f" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.395369 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-dns-svc\") pod \"dnsmasq-dns-789c5c5cb7-k9zn2\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.395385 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-ovsdbserver-sb\") pod \"dnsmasq-dns-789c5c5cb7-k9zn2\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.395481 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-httpd-config\") pod \"neutron-c4b94795b-c6c2f\" (UID: \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\") " pod="openstack/neutron-c4b94795b-c6c2f" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.395557 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-dns-swift-storage-0\") pod \"dnsmasq-dns-789c5c5cb7-k9zn2\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.395595 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-ovndb-tls-certs\") pod \"neutron-c4b94795b-c6c2f\" (UID: \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\") " pod="openstack/neutron-c4b94795b-c6c2f" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.395659 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqlvn\" (UniqueName: \"kubernetes.io/projected/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-kube-api-access-cqlvn\") pod \"dnsmasq-dns-789c5c5cb7-k9zn2\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.395685 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-config\") pod \"dnsmasq-dns-789c5c5cb7-k9zn2\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.395761 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-combined-ca-bundle\") pod \"neutron-c4b94795b-c6c2f\" (UID: \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\") " pod="openstack/neutron-c4b94795b-c6c2f" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.395845 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-ovsdbserver-nb\") pod \"dnsmasq-dns-789c5c5cb7-k9zn2\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.453663 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-c4b94795b-c6c2f"] Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.498812 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5lwdt\" (UniqueName: \"kubernetes.io/projected/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-kube-api-access-5lwdt\") pod \"neutron-c4b94795b-c6c2f\" (UID: \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\") " pod="openstack/neutron-c4b94795b-c6c2f" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.498876 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-config\") pod \"neutron-c4b94795b-c6c2f\" (UID: \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\") " pod="openstack/neutron-c4b94795b-c6c2f" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.498963 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-ovsdbserver-sb\") pod \"dnsmasq-dns-789c5c5cb7-k9zn2\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.498986 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-dns-svc\") pod \"dnsmasq-dns-789c5c5cb7-k9zn2\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.499018 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-httpd-config\") pod \"neutron-c4b94795b-c6c2f\" (UID: \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\") " pod="openstack/neutron-c4b94795b-c6c2f" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.499048 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-dns-swift-storage-0\") pod \"dnsmasq-dns-789c5c5cb7-k9zn2\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.499090 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-ovndb-tls-certs\") pod \"neutron-c4b94795b-c6c2f\" (UID: \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\") " pod="openstack/neutron-c4b94795b-c6c2f" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.499134 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqlvn\" (UniqueName: \"kubernetes.io/projected/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-kube-api-access-cqlvn\") pod \"dnsmasq-dns-789c5c5cb7-k9zn2\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.499158 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-config\") pod \"dnsmasq-dns-789c5c5cb7-k9zn2\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.499253 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-combined-ca-bundle\") pod \"neutron-c4b94795b-c6c2f\" (UID: \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\") " pod="openstack/neutron-c4b94795b-c6c2f" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.499333 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-ovsdbserver-nb\") pod \"dnsmasq-dns-789c5c5cb7-k9zn2\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.507341 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-ovndb-tls-certs\") pod \"neutron-c4b94795b-c6c2f\" (UID: \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\") " pod="openstack/neutron-c4b94795b-c6c2f" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.520308 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-config\") pod \"neutron-c4b94795b-c6c2f\" (UID: \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\") " pod="openstack/neutron-c4b94795b-c6c2f" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.522229 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqlvn\" (UniqueName: \"kubernetes.io/projected/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-kube-api-access-cqlvn\") pod \"dnsmasq-dns-789c5c5cb7-k9zn2\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.523017 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-dns-svc\") pod \"dnsmasq-dns-789c5c5cb7-k9zn2\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.526729 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-combined-ca-bundle\") pod \"neutron-c4b94795b-c6c2f\" (UID: \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\") " pod="openstack/neutron-c4b94795b-c6c2f" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.528004 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-config\") pod \"dnsmasq-dns-789c5c5cb7-k9zn2\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.532836 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-httpd-config\") pod \"neutron-c4b94795b-c6c2f\" (UID: \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\") " pod="openstack/neutron-c4b94795b-c6c2f" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.534090 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-ovsdbserver-sb\") pod \"dnsmasq-dns-789c5c5cb7-k9zn2\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.534309 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-ovsdbserver-nb\") pod \"dnsmasq-dns-789c5c5cb7-k9zn2\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.536246 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-dns-swift-storage-0\") pod \"dnsmasq-dns-789c5c5cb7-k9zn2\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.561338 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5lwdt\" (UniqueName: \"kubernetes.io/projected/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-kube-api-access-5lwdt\") pod \"neutron-c4b94795b-c6c2f\" (UID: \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\") " pod="openstack/neutron-c4b94795b-c6c2f" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.614416 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.670163 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c4b94795b-c6c2f" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.927127 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-vvjsm" event={"ID":"2c22dae7-e545-4eb0-9552-f3c691f397df","Type":"ContainerDied","Data":"1311f3284b52fb7222b5ee544dc846e7a011ea146fc47fc317796be6e6191208"} Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.927442 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1311f3284b52fb7222b5ee544dc846e7a011ea146fc47fc317796be6e6191208" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.934175 5039 generic.go:334] "Generic (PLEG): container finished" podID="158b7bf7-1207-4509-bb3f-d666847eb59d" containerID="15134f683af8e079b3b63a39bb9b45b623759db0f04ab5e5a95d5b08bdc8d70c" exitCode=0 Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.934202 5039 generic.go:334] "Generic (PLEG): container finished" podID="158b7bf7-1207-4509-bb3f-d666847eb59d" containerID="ae8cd0912319598871420e063001336b50c3773c4c4d4bd25ccf57e0419f0a2c" exitCode=2 Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.934240 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"158b7bf7-1207-4509-bb3f-d666847eb59d","Type":"ContainerDied","Data":"15134f683af8e079b3b63a39bb9b45b623759db0f04ab5e5a95d5b08bdc8d70c"} Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.934264 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"158b7bf7-1207-4509-bb3f-d666847eb59d","Type":"ContainerDied","Data":"ae8cd0912319598871420e063001336b50c3773c4c4d4bd25ccf57e0419f0a2c"} Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.942241 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-cd9b99d5d-xfvxc" event={"ID":"b291da35-220c-4325-bef8-891d00c483cb","Type":"ContainerStarted","Data":"fa7ab55ab582921b4ab93b986000346106e4b854c36bf820f9845d4255a0ac1a"} Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.942300 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-cd9b99d5d-xfvxc" event={"ID":"b291da35-220c-4325-bef8-891d00c483cb","Type":"ContainerStarted","Data":"aecd07853f12f36c3cb95266ebe29032fd9760f8bf296744259357692996c74e"} Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.945547 5039 generic.go:334] "Generic (PLEG): container finished" podID="66af038d-f84a-45bf-ab68-5d1abb9ff0e2" containerID="d398212dd1136ded2923aa4162d8316187914b28dc257a8568a3db31e21db0c7" exitCode=0 Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.945629 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7979dc8455-9nppj" event={"ID":"66af038d-f84a-45bf-ab68-5d1abb9ff0e2","Type":"ContainerDied","Data":"d398212dd1136ded2923aa4162d8316187914b28dc257a8568a3db31e21db0c7"} Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.945655 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7979dc8455-9nppj" event={"ID":"66af038d-f84a-45bf-ab68-5d1abb9ff0e2","Type":"ContainerStarted","Data":"6448e2aae8333c58fd713410acceef734f99f6107decac67516f69c4a38fbd4a"} Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.954439 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-qvrwf" event={"ID":"7c0313ce-4944-4fad-bce0-47d60b273f69","Type":"ContainerDied","Data":"bd9ae7ac87801b66dd098f612d39ad48db979ad18bdcc8c9292952cb351eaccb"} Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.954486 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bd9ae7ac87801b66dd098f612d39ad48db979ad18bdcc8c9292952cb351eaccb" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.957540 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-47dzd" event={"ID":"cb2d453a-99e6-4593-ad2d-a57c7a2c2519","Type":"ContainerDied","Data":"76be77551badf9f5cad8f5dac08e7c460a90f17aed782b7a85250574c2317d04"} Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.957572 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76be77551badf9f5cad8f5dac08e7c460a90f17aed782b7a85250574c2317d04" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.978402 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-qvrwf" Nov 24 13:40:52 crc kubenswrapper[5039]: I1124 13:40:52.995778 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-47dzd" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.029340 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-vvjsm" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.124741 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-etc-machine-id\") pod \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.124823 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-config-data\") pod \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.124870 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "cb2d453a-99e6-4593-ad2d-a57c7a2c2519" (UID: "cb2d453a-99e6-4593-ad2d-a57c7a2c2519"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.125018 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gxkfs\" (UniqueName: \"kubernetes.io/projected/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-kube-api-access-gxkfs\") pod \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.125048 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-scripts\") pod \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.125152 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-combined-ca-bundle\") pod \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.125279 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c0313ce-4944-4fad-bce0-47d60b273f69-config-data\") pod \"7c0313ce-4944-4fad-bce0-47d60b273f69\" (UID: \"7c0313ce-4944-4fad-bce0-47d60b273f69\") " Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.125319 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c0313ce-4944-4fad-bce0-47d60b273f69-combined-ca-bundle\") pod \"7c0313ce-4944-4fad-bce0-47d60b273f69\" (UID: \"7c0313ce-4944-4fad-bce0-47d60b273f69\") " Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.125351 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-db-sync-config-data\") pod \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\" (UID: \"cb2d453a-99e6-4593-ad2d-a57c7a2c2519\") " Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.125377 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w5smd\" (UniqueName: \"kubernetes.io/projected/7c0313ce-4944-4fad-bce0-47d60b273f69-kube-api-access-w5smd\") pod \"7c0313ce-4944-4fad-bce0-47d60b273f69\" (UID: \"7c0313ce-4944-4fad-bce0-47d60b273f69\") " Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.126115 5039 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.131761 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-kube-api-access-gxkfs" (OuterVolumeSpecName: "kube-api-access-gxkfs") pod "cb2d453a-99e6-4593-ad2d-a57c7a2c2519" (UID: "cb2d453a-99e6-4593-ad2d-a57c7a2c2519"). InnerVolumeSpecName "kube-api-access-gxkfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.132705 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-scripts" (OuterVolumeSpecName: "scripts") pod "cb2d453a-99e6-4593-ad2d-a57c7a2c2519" (UID: "cb2d453a-99e6-4593-ad2d-a57c7a2c2519"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.138177 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c0313ce-4944-4fad-bce0-47d60b273f69-kube-api-access-w5smd" (OuterVolumeSpecName: "kube-api-access-w5smd") pod "7c0313ce-4944-4fad-bce0-47d60b273f69" (UID: "7c0313ce-4944-4fad-bce0-47d60b273f69"). InnerVolumeSpecName "kube-api-access-w5smd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.151871 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "cb2d453a-99e6-4593-ad2d-a57c7a2c2519" (UID: "cb2d453a-99e6-4593-ad2d-a57c7a2c2519"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.166189 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cb2d453a-99e6-4593-ad2d-a57c7a2c2519" (UID: "cb2d453a-99e6-4593-ad2d-a57c7a2c2519"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.199166 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c0313ce-4944-4fad-bce0-47d60b273f69-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7c0313ce-4944-4fad-bce0-47d60b273f69" (UID: "7c0313ce-4944-4fad-bce0-47d60b273f69"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.201077 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-config-data" (OuterVolumeSpecName: "config-data") pod "cb2d453a-99e6-4593-ad2d-a57c7a2c2519" (UID: "cb2d453a-99e6-4593-ad2d-a57c7a2c2519"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.231241 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c22dae7-e545-4eb0-9552-f3c691f397df-config-data\") pod \"2c22dae7-e545-4eb0-9552-f3c691f397df\" (UID: \"2c22dae7-e545-4eb0-9552-f3c691f397df\") " Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.231475 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f27nh\" (UniqueName: \"kubernetes.io/projected/2c22dae7-e545-4eb0-9552-f3c691f397df-kube-api-access-f27nh\") pod \"2c22dae7-e545-4eb0-9552-f3c691f397df\" (UID: \"2c22dae7-e545-4eb0-9552-f3c691f397df\") " Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.231517 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2c22dae7-e545-4eb0-9552-f3c691f397df-db-sync-config-data\") pod \"2c22dae7-e545-4eb0-9552-f3c691f397df\" (UID: \"2c22dae7-e545-4eb0-9552-f3c691f397df\") " Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.231568 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c22dae7-e545-4eb0-9552-f3c691f397df-combined-ca-bundle\") pod \"2c22dae7-e545-4eb0-9552-f3c691f397df\" (UID: \"2c22dae7-e545-4eb0-9552-f3c691f397df\") " Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.233147 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c0313ce-4944-4fad-bce0-47d60b273f69-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.233183 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w5smd\" (UniqueName: \"kubernetes.io/projected/7c0313ce-4944-4fad-bce0-47d60b273f69-kube-api-access-w5smd\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.233195 5039 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.233204 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.233213 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gxkfs\" (UniqueName: \"kubernetes.io/projected/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-kube-api-access-gxkfs\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.233222 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.233231 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb2d453a-99e6-4593-ad2d-a57c7a2c2519-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.236193 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c22dae7-e545-4eb0-9552-f3c691f397df-kube-api-access-f27nh" (OuterVolumeSpecName: "kube-api-access-f27nh") pod "2c22dae7-e545-4eb0-9552-f3c691f397df" (UID: "2c22dae7-e545-4eb0-9552-f3c691f397df"). InnerVolumeSpecName "kube-api-access-f27nh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.237546 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c22dae7-e545-4eb0-9552-f3c691f397df-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "2c22dae7-e545-4eb0-9552-f3c691f397df" (UID: "2c22dae7-e545-4eb0-9552-f3c691f397df"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.252123 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c0313ce-4944-4fad-bce0-47d60b273f69-config-data" (OuterVolumeSpecName: "config-data") pod "7c0313ce-4944-4fad-bce0-47d60b273f69" (UID: "7c0313ce-4944-4fad-bce0-47d60b273f69"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.322968 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c22dae7-e545-4eb0-9552-f3c691f397df-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2c22dae7-e545-4eb0-9552-f3c691f397df" (UID: "2c22dae7-e545-4eb0-9552-f3c691f397df"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.330701 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-789c5c5cb7-k9zn2"] Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.336294 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f27nh\" (UniqueName: \"kubernetes.io/projected/2c22dae7-e545-4eb0-9552-f3c691f397df-kube-api-access-f27nh\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.336318 5039 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2c22dae7-e545-4eb0-9552-f3c691f397df-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.336327 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c22dae7-e545-4eb0-9552-f3c691f397df-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.336339 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c0313ce-4944-4fad-bce0-47d60b273f69-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.384888 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c22dae7-e545-4eb0-9552-f3c691f397df-config-data" (OuterVolumeSpecName: "config-data") pod "2c22dae7-e545-4eb0-9552-f3c691f397df" (UID: "2c22dae7-e545-4eb0-9552-f3c691f397df"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.438074 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c22dae7-e545-4eb0-9552-f3c691f397df-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.463604 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.469676 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-c4b94795b-c6c2f"] Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.642309 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-dns-swift-storage-0\") pod \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.642786 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-25hr5\" (UniqueName: \"kubernetes.io/projected/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-kube-api-access-25hr5\") pod \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.642981 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-ovsdbserver-sb\") pod \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.643059 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-config\") pod \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.643119 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-ovsdbserver-nb\") pod \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.643318 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-dns-svc\") pod \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\" (UID: \"66af038d-f84a-45bf-ab68-5d1abb9ff0e2\") " Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.650981 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-kube-api-access-25hr5" (OuterVolumeSpecName: "kube-api-access-25hr5") pod "66af038d-f84a-45bf-ab68-5d1abb9ff0e2" (UID: "66af038d-f84a-45bf-ab68-5d1abb9ff0e2"). InnerVolumeSpecName "kube-api-access-25hr5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.681474 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "66af038d-f84a-45bf-ab68-5d1abb9ff0e2" (UID: "66af038d-f84a-45bf-ab68-5d1abb9ff0e2"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.683279 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "66af038d-f84a-45bf-ab68-5d1abb9ff0e2" (UID: "66af038d-f84a-45bf-ab68-5d1abb9ff0e2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.683712 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "66af038d-f84a-45bf-ab68-5d1abb9ff0e2" (UID: "66af038d-f84a-45bf-ab68-5d1abb9ff0e2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.694885 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-config" (OuterVolumeSpecName: "config") pod "66af038d-f84a-45bf-ab68-5d1abb9ff0e2" (UID: "66af038d-f84a-45bf-ab68-5d1abb9ff0e2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.697087 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "66af038d-f84a-45bf-ab68-5d1abb9ff0e2" (UID: "66af038d-f84a-45bf-ab68-5d1abb9ff0e2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.747025 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-25hr5\" (UniqueName: \"kubernetes.io/projected/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-kube-api-access-25hr5\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.747264 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.747353 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.747421 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.747537 5039 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.747616 5039 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/66af038d-f84a-45bf-ab68-5d1abb9ff0e2-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.978489 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-cd9b99d5d-xfvxc" event={"ID":"b291da35-220c-4325-bef8-891d00c483cb","Type":"ContainerStarted","Data":"eccb1942bd9f6301d01ef443a846abf033d43febca3c9bb34f22a1126bdab967"} Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.979065 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.979102 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.981157 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7979dc8455-9nppj" event={"ID":"66af038d-f84a-45bf-ab68-5d1abb9ff0e2","Type":"ContainerDied","Data":"6448e2aae8333c58fd713410acceef734f99f6107decac67516f69c4a38fbd4a"} Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.981218 5039 scope.go:117] "RemoveContainer" containerID="d398212dd1136ded2923aa4162d8316187914b28dc257a8568a3db31e21db0c7" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.981270 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7979dc8455-9nppj" Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.991267 5039 generic.go:334] "Generic (PLEG): container finished" podID="626a5ed2-5dc0-47f1-af4c-f4f21adfda35" containerID="aedbf933edbc8d7614313e85e958b5f9897454a02d0abd206ff9be55ce257fed" exitCode=0 Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.991375 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" event={"ID":"626a5ed2-5dc0-47f1-af4c-f4f21adfda35","Type":"ContainerDied","Data":"aedbf933edbc8d7614313e85e958b5f9897454a02d0abd206ff9be55ce257fed"} Nov 24 13:40:53 crc kubenswrapper[5039]: I1124 13:40:53.991426 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" event={"ID":"626a5ed2-5dc0-47f1-af4c-f4f21adfda35","Type":"ContainerStarted","Data":"e6bc809900efb05a8176e8208687b61d45add3897a640f7cacbd86be37bdfcdb"} Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:53.997384 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c4b94795b-c6c2f" event={"ID":"7fe0bf4f-b6f8-48c0-b772-587a715e6c27","Type":"ContainerStarted","Data":"4c909e5cf8f62a06eda948c3adf848a4d64e5c832b8c030a09771e6b065e41dc"} Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.010317 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-cd9b99d5d-xfvxc" podStartSLOduration=3.01029909 podStartE2EDuration="3.01029909s" podCreationTimestamp="2025-11-24 13:40:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:40:53.999634217 +0000 UTC m=+1366.438758727" watchObservedRunningTime="2025-11-24 13:40:54.01029909 +0000 UTC m=+1366.449423590" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.019880 5039 generic.go:334] "Generic (PLEG): container finished" podID="158b7bf7-1207-4509-bb3f-d666847eb59d" containerID="50b846d0dd3fa1f7b3ebc5917ca54f611564534f5d4050a457263515aeadf570" exitCode=0 Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.020011 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-47dzd" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.021722 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-qvrwf" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.021941 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"158b7bf7-1207-4509-bb3f-d666847eb59d","Type":"ContainerDied","Data":"50b846d0dd3fa1f7b3ebc5917ca54f611564534f5d4050a457263515aeadf570"} Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.023253 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-vvjsm" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.116398 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7979dc8455-9nppj"] Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.124432 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7979dc8455-9nppj"] Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.331931 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66af038d-f84a-45bf-ab68-5d1abb9ff0e2" path="/var/lib/kubelet/pods/66af038d-f84a-45bf-ab68-5d1abb9ff0e2/volumes" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.388838 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 13:40:54 crc kubenswrapper[5039]: E1124 13:40:54.389324 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c0313ce-4944-4fad-bce0-47d60b273f69" containerName="heat-db-sync" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.389346 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c0313ce-4944-4fad-bce0-47d60b273f69" containerName="heat-db-sync" Nov 24 13:40:54 crc kubenswrapper[5039]: E1124 13:40:54.389372 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66af038d-f84a-45bf-ab68-5d1abb9ff0e2" containerName="init" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.389380 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="66af038d-f84a-45bf-ab68-5d1abb9ff0e2" containerName="init" Nov 24 13:40:54 crc kubenswrapper[5039]: E1124 13:40:54.389405 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb2d453a-99e6-4593-ad2d-a57c7a2c2519" containerName="cinder-db-sync" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.389414 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb2d453a-99e6-4593-ad2d-a57c7a2c2519" containerName="cinder-db-sync" Nov 24 13:40:54 crc kubenswrapper[5039]: E1124 13:40:54.389435 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c22dae7-e545-4eb0-9552-f3c691f397df" containerName="glance-db-sync" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.389443 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c22dae7-e545-4eb0-9552-f3c691f397df" containerName="glance-db-sync" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.392226 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="66af038d-f84a-45bf-ab68-5d1abb9ff0e2" containerName="init" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.392276 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb2d453a-99e6-4593-ad2d-a57c7a2c2519" containerName="cinder-db-sync" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.392297 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c0313ce-4944-4fad-bce0-47d60b273f69" containerName="heat-db-sync" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.392306 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c22dae7-e545-4eb0-9552-f3c691f397df" containerName="glance-db-sync" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.393748 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.403583 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-cnvk2" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.403870 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.411450 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.411675 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.427238 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.446893 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-789c5c5cb7-k9zn2"] Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.477548 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlkzb\" (UniqueName: \"kubernetes.io/projected/0c658680-b162-434c-9957-74c7c997f6d0-kube-api-access-qlkzb\") pod \"cinder-scheduler-0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " pod="openstack/cinder-scheduler-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.477640 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " pod="openstack/cinder-scheduler-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.477663 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-config-data\") pod \"cinder-scheduler-0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " pod="openstack/cinder-scheduler-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.477687 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " pod="openstack/cinder-scheduler-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.477713 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-scripts\") pod \"cinder-scheduler-0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " pod="openstack/cinder-scheduler-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.477831 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0c658680-b162-434c-9957-74c7c997f6d0-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " pod="openstack/cinder-scheduler-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.520014 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-95d56546f-z7dwq"] Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.522160 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.528491 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95d56546f-z7dwq"] Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.579748 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0c658680-b162-434c-9957-74c7c997f6d0-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " pod="openstack/cinder-scheduler-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.579819 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlkzb\" (UniqueName: \"kubernetes.io/projected/0c658680-b162-434c-9957-74c7c997f6d0-kube-api-access-qlkzb\") pod \"cinder-scheduler-0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " pod="openstack/cinder-scheduler-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.579893 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " pod="openstack/cinder-scheduler-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.579921 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-ovsdbserver-sb\") pod \"dnsmasq-dns-95d56546f-z7dwq\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.579951 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-config-data\") pod \"cinder-scheduler-0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " pod="openstack/cinder-scheduler-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.579990 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " pod="openstack/cinder-scheduler-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.580025 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-scripts\") pod \"cinder-scheduler-0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " pod="openstack/cinder-scheduler-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.580080 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-dns-swift-storage-0\") pod \"dnsmasq-dns-95d56546f-z7dwq\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.580156 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-dns-svc\") pod \"dnsmasq-dns-95d56546f-z7dwq\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.580190 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-ovsdbserver-nb\") pod \"dnsmasq-dns-95d56546f-z7dwq\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.580241 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-config\") pod \"dnsmasq-dns-95d56546f-z7dwq\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.580281 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d59lr\" (UniqueName: \"kubernetes.io/projected/0082e16b-213f-4995-b96c-da1c3634191e-kube-api-access-d59lr\") pod \"dnsmasq-dns-95d56546f-z7dwq\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.580419 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0c658680-b162-434c-9957-74c7c997f6d0-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " pod="openstack/cinder-scheduler-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.589384 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " pod="openstack/cinder-scheduler-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.597733 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " pod="openstack/cinder-scheduler-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.619194 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-config-data\") pod \"cinder-scheduler-0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " pod="openstack/cinder-scheduler-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.621070 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-scripts\") pod \"cinder-scheduler-0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " pod="openstack/cinder-scheduler-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.630369 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlkzb\" (UniqueName: \"kubernetes.io/projected/0c658680-b162-434c-9957-74c7c997f6d0-kube-api-access-qlkzb\") pod \"cinder-scheduler-0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " pod="openstack/cinder-scheduler-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.682598 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95d56546f-z7dwq"] Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.684852 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-dns-swift-storage-0\") pod \"dnsmasq-dns-95d56546f-z7dwq\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.684915 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-dns-svc\") pod \"dnsmasq-dns-95d56546f-z7dwq\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.684939 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-ovsdbserver-nb\") pod \"dnsmasq-dns-95d56546f-z7dwq\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.684977 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-config\") pod \"dnsmasq-dns-95d56546f-z7dwq\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.685002 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d59lr\" (UniqueName: \"kubernetes.io/projected/0082e16b-213f-4995-b96c-da1c3634191e-kube-api-access-d59lr\") pod \"dnsmasq-dns-95d56546f-z7dwq\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.685067 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-ovsdbserver-sb\") pod \"dnsmasq-dns-95d56546f-z7dwq\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.685995 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-ovsdbserver-sb\") pod \"dnsmasq-dns-95d56546f-z7dwq\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.687948 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-ovsdbserver-nb\") pod \"dnsmasq-dns-95d56546f-z7dwq\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.688709 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-dns-svc\") pod \"dnsmasq-dns-95d56546f-z7dwq\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.691389 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-config\") pod \"dnsmasq-dns-95d56546f-z7dwq\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.691857 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-dns-swift-storage-0\") pod \"dnsmasq-dns-95d56546f-z7dwq\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.695738 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.697916 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.709962 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-p8jd7"] Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.711948 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.713880 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.729628 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d59lr\" (UniqueName: \"kubernetes.io/projected/0082e16b-213f-4995-b96c-da1c3634191e-kube-api-access-d59lr\") pod \"dnsmasq-dns-95d56546f-z7dwq\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.733606 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.744165 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-p8jd7"] Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.759573 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.787804 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-scripts\") pod \"cinder-api-0\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " pod="openstack/cinder-api-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.787847 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-p8jd7\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.787873 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-config-data-custom\") pod \"cinder-api-0\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " pod="openstack/cinder-api-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.787899 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-p8jd7\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.787931 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxrv7\" (UniqueName: \"kubernetes.io/projected/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-kube-api-access-nxrv7\") pod \"cinder-api-0\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " pod="openstack/cinder-api-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.788040 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nz25p\" (UniqueName: \"kubernetes.io/projected/0840f945-89d7-40d1-b8dc-629d32793a6c-kube-api-access-nz25p\") pod \"dnsmasq-dns-5784cf869f-p8jd7\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.788144 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-p8jd7\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.788166 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-config-data\") pod \"cinder-api-0\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " pod="openstack/cinder-api-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.788181 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-etc-machine-id\") pod \"cinder-api-0\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " pod="openstack/cinder-api-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.788251 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-dns-svc\") pod \"dnsmasq-dns-5784cf869f-p8jd7\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.788350 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " pod="openstack/cinder-api-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.788421 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-config\") pod \"dnsmasq-dns-5784cf869f-p8jd7\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.788438 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-logs\") pod \"cinder-api-0\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " pod="openstack/cinder-api-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.799491 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.897557 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-p8jd7\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.897610 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-etc-machine-id\") pod \"cinder-api-0\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " pod="openstack/cinder-api-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.897627 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-config-data\") pod \"cinder-api-0\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " pod="openstack/cinder-api-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.897645 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-dns-svc\") pod \"dnsmasq-dns-5784cf869f-p8jd7\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.897667 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " pod="openstack/cinder-api-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.897711 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-etc-machine-id\") pod \"cinder-api-0\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " pod="openstack/cinder-api-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.897723 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-config\") pod \"dnsmasq-dns-5784cf869f-p8jd7\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.897766 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-logs\") pod \"cinder-api-0\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " pod="openstack/cinder-api-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.897813 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-scripts\") pod \"cinder-api-0\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " pod="openstack/cinder-api-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.897837 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-p8jd7\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.897864 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-config-data-custom\") pod \"cinder-api-0\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " pod="openstack/cinder-api-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.897898 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-p8jd7\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.897948 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxrv7\" (UniqueName: \"kubernetes.io/projected/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-kube-api-access-nxrv7\") pod \"cinder-api-0\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " pod="openstack/cinder-api-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.897966 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nz25p\" (UniqueName: \"kubernetes.io/projected/0840f945-89d7-40d1-b8dc-629d32793a6c-kube-api-access-nz25p\") pod \"dnsmasq-dns-5784cf869f-p8jd7\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.898407 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-p8jd7\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.898537 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-dns-svc\") pod \"dnsmasq-dns-5784cf869f-p8jd7\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.901268 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-logs\") pod \"cinder-api-0\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " pod="openstack/cinder-api-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.903819 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-config-data\") pod \"cinder-api-0\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " pod="openstack/cinder-api-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.904053 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " pod="openstack/cinder-api-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.904931 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-config\") pod \"dnsmasq-dns-5784cf869f-p8jd7\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.905068 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-scripts\") pod \"cinder-api-0\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " pod="openstack/cinder-api-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.906979 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-config-data-custom\") pod \"cinder-api-0\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " pod="openstack/cinder-api-0" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.907372 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-p8jd7\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.908634 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-p8jd7\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.916480 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nz25p\" (UniqueName: \"kubernetes.io/projected/0840f945-89d7-40d1-b8dc-629d32793a6c-kube-api-access-nz25p\") pod \"dnsmasq-dns-5784cf869f-p8jd7\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:40:54 crc kubenswrapper[5039]: I1124 13:40:54.923183 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxrv7\" (UniqueName: \"kubernetes.io/projected/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-kube-api-access-nxrv7\") pod \"cinder-api-0\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " pod="openstack/cinder-api-0" Nov 24 13:40:55 crc kubenswrapper[5039]: I1124 13:40:55.118485 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 24 13:40:55 crc kubenswrapper[5039]: I1124 13:40:55.132406 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:40:55 crc kubenswrapper[5039]: I1124 13:40:55.929748 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:40:55 crc kubenswrapper[5039]: I1124 13:40:55.965082 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95d56546f-z7dwq"] Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.020285 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-config-data\") pod \"158b7bf7-1207-4509-bb3f-d666847eb59d\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.020363 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-scripts\") pod \"158b7bf7-1207-4509-bb3f-d666847eb59d\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.020450 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-combined-ca-bundle\") pod \"158b7bf7-1207-4509-bb3f-d666847eb59d\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.020528 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/158b7bf7-1207-4509-bb3f-d666847eb59d-log-httpd\") pod \"158b7bf7-1207-4509-bb3f-d666847eb59d\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.020561 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/158b7bf7-1207-4509-bb3f-d666847eb59d-run-httpd\") pod \"158b7bf7-1207-4509-bb3f-d666847eb59d\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.020608 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vc7l\" (UniqueName: \"kubernetes.io/projected/158b7bf7-1207-4509-bb3f-d666847eb59d-kube-api-access-8vc7l\") pod \"158b7bf7-1207-4509-bb3f-d666847eb59d\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.020670 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-sg-core-conf-yaml\") pod \"158b7bf7-1207-4509-bb3f-d666847eb59d\" (UID: \"158b7bf7-1207-4509-bb3f-d666847eb59d\") " Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.021959 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/158b7bf7-1207-4509-bb3f-d666847eb59d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "158b7bf7-1207-4509-bb3f-d666847eb59d" (UID: "158b7bf7-1207-4509-bb3f-d666847eb59d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.022156 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/158b7bf7-1207-4509-bb3f-d666847eb59d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "158b7bf7-1207-4509-bb3f-d666847eb59d" (UID: "158b7bf7-1207-4509-bb3f-d666847eb59d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.027445 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-scripts" (OuterVolumeSpecName: "scripts") pod "158b7bf7-1207-4509-bb3f-d666847eb59d" (UID: "158b7bf7-1207-4509-bb3f-d666847eb59d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.027552 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/158b7bf7-1207-4509-bb3f-d666847eb59d-kube-api-access-8vc7l" (OuterVolumeSpecName: "kube-api-access-8vc7l") pod "158b7bf7-1207-4509-bb3f-d666847eb59d" (UID: "158b7bf7-1207-4509-bb3f-d666847eb59d"). InnerVolumeSpecName "kube-api-access-8vc7l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.051651 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-746f5fd69d-mww4x" event={"ID":"df3df8f8-f89f-4eab-98af-d7dd6cfe17da","Type":"ContainerStarted","Data":"553a9f351583dd521de1b99892710245e1a94d0ad318e79a6bcf055c25c7b7b0"} Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.057465 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" event={"ID":"626a5ed2-5dc0-47f1-af4c-f4f21adfda35","Type":"ContainerStarted","Data":"16cff593891733afa678682d0b9b4dba6aa6d1a2c2e8ed114c8f1ac57e2564aa"} Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.057535 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" podUID="626a5ed2-5dc0-47f1-af4c-f4f21adfda35" containerName="dnsmasq-dns" containerID="cri-o://16cff593891733afa678682d0b9b4dba6aa6d1a2c2e8ed114c8f1ac57e2564aa" gracePeriod=10 Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.057760 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.069617 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "158b7bf7-1207-4509-bb3f-d666847eb59d" (UID: "158b7bf7-1207-4509-bb3f-d666847eb59d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.070184 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c4b94795b-c6c2f" event={"ID":"7fe0bf4f-b6f8-48c0-b772-587a715e6c27","Type":"ContainerStarted","Data":"eb2f7267049802178c3dd0c7578422d8ddad0e06220f23d0572624afbd97cc53"} Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.070227 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c4b94795b-c6c2f" event={"ID":"7fe0bf4f-b6f8-48c0-b772-587a715e6c27","Type":"ContainerStarted","Data":"8437e26cb6893af74d085f476e072984c99df391d9ee19a4d901cea208fdde73"} Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.070298 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-c4b94795b-c6c2f" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.074581 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95d56546f-z7dwq" event={"ID":"0082e16b-213f-4995-b96c-da1c3634191e","Type":"ContainerStarted","Data":"42fc6a793a61b9f5bcb0d6b2443dea67ae9ffba6af96a8c7181ef1f5cb5d8cf6"} Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.094528 5039 generic.go:334] "Generic (PLEG): container finished" podID="158b7bf7-1207-4509-bb3f-d666847eb59d" containerID="8fc8f7a586d558f2f11d83f27837cff6495aeae1ae5fbea7c4dca0f303501b07" exitCode=0 Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.098829 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.099432 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"158b7bf7-1207-4509-bb3f-d666847eb59d","Type":"ContainerDied","Data":"8fc8f7a586d558f2f11d83f27837cff6495aeae1ae5fbea7c4dca0f303501b07"} Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.099721 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"158b7bf7-1207-4509-bb3f-d666847eb59d","Type":"ContainerDied","Data":"92ad996d731c70cc5d910d40f7117e4e8ffc1551bf2171368d38eba44f984c16"} Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.099762 5039 scope.go:117] "RemoveContainer" containerID="15134f683af8e079b3b63a39bb9b45b623759db0f04ab5e5a95d5b08bdc8d70c" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.127004 5039 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.127046 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.127069 5039 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/158b7bf7-1207-4509-bb3f-d666847eb59d-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.127083 5039 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/158b7bf7-1207-4509-bb3f-d666847eb59d-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.127096 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vc7l\" (UniqueName: \"kubernetes.io/projected/158b7bf7-1207-4509-bb3f-d666847eb59d-kube-api-access-8vc7l\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.135113 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" podStartSLOduration=4.135091445 podStartE2EDuration="4.135091445s" podCreationTimestamp="2025-11-24 13:40:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:40:56.077855403 +0000 UTC m=+1368.516979903" watchObservedRunningTime="2025-11-24 13:40:56.135091445 +0000 UTC m=+1368.574215935" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.153447 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-c4b94795b-c6c2f" podStartSLOduration=4.153429997 podStartE2EDuration="4.153429997s" podCreationTimestamp="2025-11-24 13:40:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:40:56.099096397 +0000 UTC m=+1368.538220917" watchObservedRunningTime="2025-11-24 13:40:56.153429997 +0000 UTC m=+1368.592554487" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.194390 5039 scope.go:117] "RemoveContainer" containerID="ae8cd0912319598871420e063001336b50c3773c4c4d4bd25ccf57e0419f0a2c" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.210090 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "158b7bf7-1207-4509-bb3f-d666847eb59d" (UID: "158b7bf7-1207-4509-bb3f-d666847eb59d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.231584 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.269275 5039 scope.go:117] "RemoveContainer" containerID="8fc8f7a586d558f2f11d83f27837cff6495aeae1ae5fbea7c4dca0f303501b07" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.273121 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-config-data" (OuterVolumeSpecName: "config-data") pod "158b7bf7-1207-4509-bb3f-d666847eb59d" (UID: "158b7bf7-1207-4509-bb3f-d666847eb59d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.301444 5039 scope.go:117] "RemoveContainer" containerID="50b846d0dd3fa1f7b3ebc5917ca54f611564534f5d4050a457263515aeadf570" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.335099 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/158b7bf7-1207-4509-bb3f-d666847eb59d-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.340877 5039 scope.go:117] "RemoveContainer" containerID="15134f683af8e079b3b63a39bb9b45b623759db0f04ab5e5a95d5b08bdc8d70c" Nov 24 13:40:56 crc kubenswrapper[5039]: E1124 13:40:56.345490 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"15134f683af8e079b3b63a39bb9b45b623759db0f04ab5e5a95d5b08bdc8d70c\": container with ID starting with 15134f683af8e079b3b63a39bb9b45b623759db0f04ab5e5a95d5b08bdc8d70c not found: ID does not exist" containerID="15134f683af8e079b3b63a39bb9b45b623759db0f04ab5e5a95d5b08bdc8d70c" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.345535 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15134f683af8e079b3b63a39bb9b45b623759db0f04ab5e5a95d5b08bdc8d70c"} err="failed to get container status \"15134f683af8e079b3b63a39bb9b45b623759db0f04ab5e5a95d5b08bdc8d70c\": rpc error: code = NotFound desc = could not find container \"15134f683af8e079b3b63a39bb9b45b623759db0f04ab5e5a95d5b08bdc8d70c\": container with ID starting with 15134f683af8e079b3b63a39bb9b45b623759db0f04ab5e5a95d5b08bdc8d70c not found: ID does not exist" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.345554 5039 scope.go:117] "RemoveContainer" containerID="ae8cd0912319598871420e063001336b50c3773c4c4d4bd25ccf57e0419f0a2c" Nov 24 13:40:56 crc kubenswrapper[5039]: E1124 13:40:56.346154 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae8cd0912319598871420e063001336b50c3773c4c4d4bd25ccf57e0419f0a2c\": container with ID starting with ae8cd0912319598871420e063001336b50c3773c4c4d4bd25ccf57e0419f0a2c not found: ID does not exist" containerID="ae8cd0912319598871420e063001336b50c3773c4c4d4bd25ccf57e0419f0a2c" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.346172 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae8cd0912319598871420e063001336b50c3773c4c4d4bd25ccf57e0419f0a2c"} err="failed to get container status \"ae8cd0912319598871420e063001336b50c3773c4c4d4bd25ccf57e0419f0a2c\": rpc error: code = NotFound desc = could not find container \"ae8cd0912319598871420e063001336b50c3773c4c4d4bd25ccf57e0419f0a2c\": container with ID starting with ae8cd0912319598871420e063001336b50c3773c4c4d4bd25ccf57e0419f0a2c not found: ID does not exist" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.346185 5039 scope.go:117] "RemoveContainer" containerID="8fc8f7a586d558f2f11d83f27837cff6495aeae1ae5fbea7c4dca0f303501b07" Nov 24 13:40:56 crc kubenswrapper[5039]: E1124 13:40:56.347006 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8fc8f7a586d558f2f11d83f27837cff6495aeae1ae5fbea7c4dca0f303501b07\": container with ID starting with 8fc8f7a586d558f2f11d83f27837cff6495aeae1ae5fbea7c4dca0f303501b07 not found: ID does not exist" containerID="8fc8f7a586d558f2f11d83f27837cff6495aeae1ae5fbea7c4dca0f303501b07" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.347047 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8fc8f7a586d558f2f11d83f27837cff6495aeae1ae5fbea7c4dca0f303501b07"} err="failed to get container status \"8fc8f7a586d558f2f11d83f27837cff6495aeae1ae5fbea7c4dca0f303501b07\": rpc error: code = NotFound desc = could not find container \"8fc8f7a586d558f2f11d83f27837cff6495aeae1ae5fbea7c4dca0f303501b07\": container with ID starting with 8fc8f7a586d558f2f11d83f27837cff6495aeae1ae5fbea7c4dca0f303501b07 not found: ID does not exist" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.347060 5039 scope.go:117] "RemoveContainer" containerID="50b846d0dd3fa1f7b3ebc5917ca54f611564534f5d4050a457263515aeadf570" Nov 24 13:40:56 crc kubenswrapper[5039]: E1124 13:40:56.347310 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"50b846d0dd3fa1f7b3ebc5917ca54f611564534f5d4050a457263515aeadf570\": container with ID starting with 50b846d0dd3fa1f7b3ebc5917ca54f611564534f5d4050a457263515aeadf570 not found: ID does not exist" containerID="50b846d0dd3fa1f7b3ebc5917ca54f611564534f5d4050a457263515aeadf570" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.347330 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50b846d0dd3fa1f7b3ebc5917ca54f611564534f5d4050a457263515aeadf570"} err="failed to get container status \"50b846d0dd3fa1f7b3ebc5917ca54f611564534f5d4050a457263515aeadf570\": rpc error: code = NotFound desc = could not find container \"50b846d0dd3fa1f7b3ebc5917ca54f611564534f5d4050a457263515aeadf570\": container with ID starting with 50b846d0dd3fa1f7b3ebc5917ca54f611564534f5d4050a457263515aeadf570 not found: ID does not exist" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.398942 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-p8jd7"] Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.413377 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.424775 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 24 13:40:56 crc kubenswrapper[5039]: W1124 13:40:56.443822 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbcfabb1c_c5cb_493e_bf10_e4d2245804c4.slice/crio-21c1ad017c85fd158d4dac75cbbf6b85558fd68e315b13670eb762d967bf11ff WatchSource:0}: Error finding container 21c1ad017c85fd158d4dac75cbbf6b85558fd68e315b13670eb762d967bf11ff: Status 404 returned error can't find the container with id 21c1ad017c85fd158d4dac75cbbf6b85558fd68e315b13670eb762d967bf11ff Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.457929 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.478677 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.489923 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:40:56 crc kubenswrapper[5039]: E1124 13:40:56.490630 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="158b7bf7-1207-4509-bb3f-d666847eb59d" containerName="ceilometer-central-agent" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.490648 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="158b7bf7-1207-4509-bb3f-d666847eb59d" containerName="ceilometer-central-agent" Nov 24 13:40:56 crc kubenswrapper[5039]: E1124 13:40:56.490683 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="158b7bf7-1207-4509-bb3f-d666847eb59d" containerName="proxy-httpd" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.490688 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="158b7bf7-1207-4509-bb3f-d666847eb59d" containerName="proxy-httpd" Nov 24 13:40:56 crc kubenswrapper[5039]: E1124 13:40:56.490704 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="158b7bf7-1207-4509-bb3f-d666847eb59d" containerName="ceilometer-notification-agent" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.490712 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="158b7bf7-1207-4509-bb3f-d666847eb59d" containerName="ceilometer-notification-agent" Nov 24 13:40:56 crc kubenswrapper[5039]: E1124 13:40:56.490723 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="158b7bf7-1207-4509-bb3f-d666847eb59d" containerName="sg-core" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.490729 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="158b7bf7-1207-4509-bb3f-d666847eb59d" containerName="sg-core" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.490938 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="158b7bf7-1207-4509-bb3f-d666847eb59d" containerName="proxy-httpd" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.490949 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="158b7bf7-1207-4509-bb3f-d666847eb59d" containerName="sg-core" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.490964 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="158b7bf7-1207-4509-bb3f-d666847eb59d" containerName="ceilometer-notification-agent" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.490980 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="158b7bf7-1207-4509-bb3f-d666847eb59d" containerName="ceilometer-central-agent" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.505369 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.505465 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.507878 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.514589 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.640193 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-scripts\") pod \"ceilometer-0\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.640260 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.640293 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-config-data\") pod \"ceilometer-0\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.640335 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb35c10f-5f1f-4175-9174-4696bada484a-log-httpd\") pod \"ceilometer-0\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.640364 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb35c10f-5f1f-4175-9174-4696bada484a-run-httpd\") pod \"ceilometer-0\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.640431 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.640449 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g27qb\" (UniqueName: \"kubernetes.io/projected/bb35c10f-5f1f-4175-9174-4696bada484a-kube-api-access-g27qb\") pod \"ceilometer-0\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.741968 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-scripts\") pod \"ceilometer-0\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.742022 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.742060 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-config-data\") pod \"ceilometer-0\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.742117 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb35c10f-5f1f-4175-9174-4696bada484a-log-httpd\") pod \"ceilometer-0\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.742156 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb35c10f-5f1f-4175-9174-4696bada484a-run-httpd\") pod \"ceilometer-0\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.742255 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.742276 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g27qb\" (UniqueName: \"kubernetes.io/projected/bb35c10f-5f1f-4175-9174-4696bada484a-kube-api-access-g27qb\") pod \"ceilometer-0\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.743491 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb35c10f-5f1f-4175-9174-4696bada484a-log-httpd\") pod \"ceilometer-0\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.744059 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb35c10f-5f1f-4175-9174-4696bada484a-run-httpd\") pod \"ceilometer-0\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.748711 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.749437 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-config-data\") pod \"ceilometer-0\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.750706 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.754372 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-scripts\") pod \"ceilometer-0\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.764261 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g27qb\" (UniqueName: \"kubernetes.io/projected/bb35c10f-5f1f-4175-9174-4696bada484a-kube-api-access-g27qb\") pod \"ceilometer-0\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " pod="openstack/ceilometer-0" Nov 24 13:40:56 crc kubenswrapper[5039]: I1124 13:40:56.836949 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.002868 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.150236 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-dns-svc\") pod \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.150677 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-dns-swift-storage-0\") pod \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.150731 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqlvn\" (UniqueName: \"kubernetes.io/projected/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-kube-api-access-cqlvn\") pod \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.150752 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-ovsdbserver-nb\") pod \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.150773 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-ovsdbserver-sb\") pod \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.150863 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-config\") pod \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\" (UID: \"626a5ed2-5dc0-47f1-af4c-f4f21adfda35\") " Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.158757 5039 generic.go:334] "Generic (PLEG): container finished" podID="0082e16b-213f-4995-b96c-da1c3634191e" containerID="514d7b4666b2a7036d2a48eff2955a8bfd54244243fc0ee866911d94f2cd372d" exitCode=0 Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.158846 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95d56546f-z7dwq" event={"ID":"0082e16b-213f-4995-b96c-da1c3634191e","Type":"ContainerDied","Data":"514d7b4666b2a7036d2a48eff2955a8bfd54244243fc0ee866911d94f2cd372d"} Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.178684 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-kube-api-access-cqlvn" (OuterVolumeSpecName: "kube-api-access-cqlvn") pod "626a5ed2-5dc0-47f1-af4c-f4f21adfda35" (UID: "626a5ed2-5dc0-47f1-af4c-f4f21adfda35"). InnerVolumeSpecName "kube-api-access-cqlvn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.253061 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqlvn\" (UniqueName: \"kubernetes.io/projected/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-kube-api-access-cqlvn\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.253857 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "626a5ed2-5dc0-47f1-af4c-f4f21adfda35" (UID: "626a5ed2-5dc0-47f1-af4c-f4f21adfda35"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.258986 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7f697665cf-n6vcs" event={"ID":"1228dfc2-bfeb-4ba9-b0f8-ac276a2207be","Type":"ContainerStarted","Data":"9fb4d87af0370e1388b1a6e6327c34f5b83463d867db0d1c21e88fa95211c277"} Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.259033 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7f697665cf-n6vcs" event={"ID":"1228dfc2-bfeb-4ba9-b0f8-ac276a2207be","Type":"ContainerStarted","Data":"e7d75ef3c153ddc4853656dbf2433407717aede0c9fa4c9595c85a0b00497bbb"} Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.259483 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-config" (OuterVolumeSpecName: "config") pod "626a5ed2-5dc0-47f1-af4c-f4f21adfda35" (UID: "626a5ed2-5dc0-47f1-af4c-f4f21adfda35"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.265013 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" event={"ID":"0840f945-89d7-40d1-b8dc-629d32793a6c","Type":"ContainerStarted","Data":"e9db6f74f933c7a73061adadbe8e9f0a39dfbd1b2111ae82b5c5f525146970eb"} Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.286694 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-7f697665cf-n6vcs" podStartSLOduration=3.761001721 podStartE2EDuration="7.286672576s" podCreationTimestamp="2025-11-24 13:40:50 +0000 UTC" firstStartedPulling="2025-11-24 13:40:51.855240669 +0000 UTC m=+1364.294365169" lastFinishedPulling="2025-11-24 13:40:55.380911524 +0000 UTC m=+1367.820036024" observedRunningTime="2025-11-24 13:40:57.283208061 +0000 UTC m=+1369.722332561" watchObservedRunningTime="2025-11-24 13:40:57.286672576 +0000 UTC m=+1369.725797086" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.287126 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"bcfabb1c-c5cb-493e-bf10-e4d2245804c4","Type":"ContainerStarted","Data":"21c1ad017c85fd158d4dac75cbbf6b85558fd68e315b13670eb762d967bf11ff"} Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.294214 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-746f5fd69d-mww4x" event={"ID":"df3df8f8-f89f-4eab-98af-d7dd6cfe17da","Type":"ContainerStarted","Data":"2e7e587900fc3c980b377a519acf9d41b37f71a57d869fd9a59e02e526fa9381"} Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.333858 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-746f5fd69d-mww4x" podStartSLOduration=3.765285457 podStartE2EDuration="7.33384009s" podCreationTimestamp="2025-11-24 13:40:50 +0000 UTC" firstStartedPulling="2025-11-24 13:40:51.732901971 +0000 UTC m=+1364.172026471" lastFinishedPulling="2025-11-24 13:40:55.301456614 +0000 UTC m=+1367.740581104" observedRunningTime="2025-11-24 13:40:57.327707798 +0000 UTC m=+1369.766832318" watchObservedRunningTime="2025-11-24 13:40:57.33384009 +0000 UTC m=+1369.772964590" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.335802 5039 generic.go:334] "Generic (PLEG): container finished" podID="626a5ed2-5dc0-47f1-af4c-f4f21adfda35" containerID="16cff593891733afa678682d0b9b4dba6aa6d1a2c2e8ed114c8f1ac57e2564aa" exitCode=0 Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.335911 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.335929 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" event={"ID":"626a5ed2-5dc0-47f1-af4c-f4f21adfda35","Type":"ContainerDied","Data":"16cff593891733afa678682d0b9b4dba6aa6d1a2c2e8ed114c8f1ac57e2564aa"} Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.335963 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-789c5c5cb7-k9zn2" event={"ID":"626a5ed2-5dc0-47f1-af4c-f4f21adfda35","Type":"ContainerDied","Data":"e6bc809900efb05a8176e8208687b61d45add3897a640f7cacbd86be37bdfcdb"} Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.335984 5039 scope.go:117] "RemoveContainer" containerID="16cff593891733afa678682d0b9b4dba6aa6d1a2c2e8ed114c8f1ac57e2564aa" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.344382 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "626a5ed2-5dc0-47f1-af4c-f4f21adfda35" (UID: "626a5ed2-5dc0-47f1-af4c-f4f21adfda35"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.350890 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0c658680-b162-434c-9957-74c7c997f6d0","Type":"ContainerStarted","Data":"6b8fd292c2e1cde7b536c33e354f458779c27c8a87e40f4c6630c2d87d78a1e2"} Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.359180 5039 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.359217 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.359229 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.367819 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "626a5ed2-5dc0-47f1-af4c-f4f21adfda35" (UID: "626a5ed2-5dc0-47f1-af4c-f4f21adfda35"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.390818 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "626a5ed2-5dc0-47f1-af4c-f4f21adfda35" (UID: "626a5ed2-5dc0-47f1-af4c-f4f21adfda35"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.463558 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.463596 5039 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/626a5ed2-5dc0-47f1-af4c-f4f21adfda35-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.518895 5039 scope.go:117] "RemoveContainer" containerID="aedbf933edbc8d7614313e85e958b5f9897454a02d0abd206ff9be55ce257fed" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.601362 5039 scope.go:117] "RemoveContainer" containerID="16cff593891733afa678682d0b9b4dba6aa6d1a2c2e8ed114c8f1ac57e2564aa" Nov 24 13:40:57 crc kubenswrapper[5039]: E1124 13:40:57.601751 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16cff593891733afa678682d0b9b4dba6aa6d1a2c2e8ed114c8f1ac57e2564aa\": container with ID starting with 16cff593891733afa678682d0b9b4dba6aa6d1a2c2e8ed114c8f1ac57e2564aa not found: ID does not exist" containerID="16cff593891733afa678682d0b9b4dba6aa6d1a2c2e8ed114c8f1ac57e2564aa" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.601776 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16cff593891733afa678682d0b9b4dba6aa6d1a2c2e8ed114c8f1ac57e2564aa"} err="failed to get container status \"16cff593891733afa678682d0b9b4dba6aa6d1a2c2e8ed114c8f1ac57e2564aa\": rpc error: code = NotFound desc = could not find container \"16cff593891733afa678682d0b9b4dba6aa6d1a2c2e8ed114c8f1ac57e2564aa\": container with ID starting with 16cff593891733afa678682d0b9b4dba6aa6d1a2c2e8ed114c8f1ac57e2564aa not found: ID does not exist" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.601795 5039 scope.go:117] "RemoveContainer" containerID="aedbf933edbc8d7614313e85e958b5f9897454a02d0abd206ff9be55ce257fed" Nov 24 13:40:57 crc kubenswrapper[5039]: E1124 13:40:57.601980 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aedbf933edbc8d7614313e85e958b5f9897454a02d0abd206ff9be55ce257fed\": container with ID starting with aedbf933edbc8d7614313e85e958b5f9897454a02d0abd206ff9be55ce257fed not found: ID does not exist" containerID="aedbf933edbc8d7614313e85e958b5f9897454a02d0abd206ff9be55ce257fed" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.601997 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aedbf933edbc8d7614313e85e958b5f9897454a02d0abd206ff9be55ce257fed"} err="failed to get container status \"aedbf933edbc8d7614313e85e958b5f9897454a02d0abd206ff9be55ce257fed\": rpc error: code = NotFound desc = could not find container \"aedbf933edbc8d7614313e85e958b5f9897454a02d0abd206ff9be55ce257fed\": container with ID starting with aedbf933edbc8d7614313e85e958b5f9897454a02d0abd206ff9be55ce257fed not found: ID does not exist" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.678167 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.901901 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.921561 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-789c5c5cb7-k9zn2"] Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.935724 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-789c5c5cb7-k9zn2"] Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.973063 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-dns-svc\") pod \"0082e16b-213f-4995-b96c-da1c3634191e\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.973149 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-ovsdbserver-nb\") pod \"0082e16b-213f-4995-b96c-da1c3634191e\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.973256 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-ovsdbserver-sb\") pod \"0082e16b-213f-4995-b96c-da1c3634191e\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.973276 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-config\") pod \"0082e16b-213f-4995-b96c-da1c3634191e\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.973360 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d59lr\" (UniqueName: \"kubernetes.io/projected/0082e16b-213f-4995-b96c-da1c3634191e-kube-api-access-d59lr\") pod \"0082e16b-213f-4995-b96c-da1c3634191e\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.973395 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-dns-swift-storage-0\") pod \"0082e16b-213f-4995-b96c-da1c3634191e\" (UID: \"0082e16b-213f-4995-b96c-da1c3634191e\") " Nov 24 13:40:57 crc kubenswrapper[5039]: I1124 13:40:57.989813 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0082e16b-213f-4995-b96c-da1c3634191e-kube-api-access-d59lr" (OuterVolumeSpecName: "kube-api-access-d59lr") pod "0082e16b-213f-4995-b96c-da1c3634191e" (UID: "0082e16b-213f-4995-b96c-da1c3634191e"). InnerVolumeSpecName "kube-api-access-d59lr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.001186 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "0082e16b-213f-4995-b96c-da1c3634191e" (UID: "0082e16b-213f-4995-b96c-da1c3634191e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.002609 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0082e16b-213f-4995-b96c-da1c3634191e" (UID: "0082e16b-213f-4995-b96c-da1c3634191e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.003284 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0082e16b-213f-4995-b96c-da1c3634191e" (UID: "0082e16b-213f-4995-b96c-da1c3634191e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.003855 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0082e16b-213f-4995-b96c-da1c3634191e" (UID: "0082e16b-213f-4995-b96c-da1c3634191e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.011047 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-config" (OuterVolumeSpecName: "config") pod "0082e16b-213f-4995-b96c-da1c3634191e" (UID: "0082e16b-213f-4995-b96c-da1c3634191e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.074985 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.075012 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.075022 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d59lr\" (UniqueName: \"kubernetes.io/projected/0082e16b-213f-4995-b96c-da1c3634191e-kube-api-access-d59lr\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.075032 5039 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.075043 5039 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.075051 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0082e16b-213f-4995-b96c-da1c3634191e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.351050 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="158b7bf7-1207-4509-bb3f-d666847eb59d" path="/var/lib/kubelet/pods/158b7bf7-1207-4509-bb3f-d666847eb59d/volumes" Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.358733 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="626a5ed2-5dc0-47f1-af4c-f4f21adfda35" path="/var/lib/kubelet/pods/626a5ed2-5dc0-47f1-af4c-f4f21adfda35/volumes" Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.418702 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb35c10f-5f1f-4175-9174-4696bada484a","Type":"ContainerStarted","Data":"383cd41ac44c200f38ed0cbe4d9b94ac1b1944385a3ebaa0c236b92672a41e0d"} Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.422881 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95d56546f-z7dwq" Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.423071 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95d56546f-z7dwq" event={"ID":"0082e16b-213f-4995-b96c-da1c3634191e","Type":"ContainerDied","Data":"42fc6a793a61b9f5bcb0d6b2443dea67ae9ffba6af96a8c7181ef1f5cb5d8cf6"} Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.423139 5039 scope.go:117] "RemoveContainer" containerID="514d7b4666b2a7036d2a48eff2955a8bfd54244243fc0ee866911d94f2cd372d" Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.429329 5039 generic.go:334] "Generic (PLEG): container finished" podID="0840f945-89d7-40d1-b8dc-629d32793a6c" containerID="d3a6a7d78e5f185a361db46d2d8e366e428a7610066b106ac68d1fd802dbab76" exitCode=0 Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.429396 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" event={"ID":"0840f945-89d7-40d1-b8dc-629d32793a6c","Type":"ContainerDied","Data":"d3a6a7d78e5f185a361db46d2d8e366e428a7610066b106ac68d1fd802dbab76"} Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.432755 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"bcfabb1c-c5cb-493e-bf10-e4d2245804c4","Type":"ContainerStarted","Data":"635a6b4b5a5fc1a2b6be311aa390c601e5944087fbaf82d16c92069216e0b0b5"} Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.517478 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95d56546f-z7dwq"] Nov 24 13:40:58 crc kubenswrapper[5039]: I1124 13:40:58.529726 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-95d56546f-z7dwq"] Nov 24 13:40:59 crc kubenswrapper[5039]: I1124 13:40:59.444340 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"bcfabb1c-c5cb-493e-bf10-e4d2245804c4","Type":"ContainerStarted","Data":"22529c43a0fc26d4147908227de33458db183d0e30bea0d8de09f4636c1f8516"} Nov 24 13:40:59 crc kubenswrapper[5039]: I1124 13:40:59.445612 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 24 13:40:59 crc kubenswrapper[5039]: I1124 13:40:59.446239 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb35c10f-5f1f-4175-9174-4696bada484a","Type":"ContainerStarted","Data":"7f876b92196080d7a8a1b72cb590a7aa6142c21222f40983a1db923eeee0aec2"} Nov 24 13:40:59 crc kubenswrapper[5039]: I1124 13:40:59.448094 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0c658680-b162-434c-9957-74c7c997f6d0","Type":"ContainerStarted","Data":"ed41ca9709a824f17ac7b9d9e394ae840b7a1126682caf22b4b8297118fed767"} Nov 24 13:40:59 crc kubenswrapper[5039]: I1124 13:40:59.462787 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=5.462769726 podStartE2EDuration="5.462769726s" podCreationTimestamp="2025-11-24 13:40:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:40:59.460571642 +0000 UTC m=+1371.899696152" watchObservedRunningTime="2025-11-24 13:40:59.462769726 +0000 UTC m=+1371.901894226" Nov 24 13:41:00 crc kubenswrapper[5039]: I1124 13:41:00.323288 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0082e16b-213f-4995-b96c-da1c3634191e" path="/var/lib/kubelet/pods/0082e16b-213f-4995-b96c-da1c3634191e/volumes" Nov 24 13:41:01 crc kubenswrapper[5039]: I1124 13:41:01.494066 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" event={"ID":"0840f945-89d7-40d1-b8dc-629d32793a6c","Type":"ContainerStarted","Data":"5c51c7415cbccf2387a2f7b1fa7d06b5a86ecb4b13d32e014a92b97de1f692d2"} Nov 24 13:41:01 crc kubenswrapper[5039]: I1124 13:41:01.494709 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:41:01 crc kubenswrapper[5039]: I1124 13:41:01.498106 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb35c10f-5f1f-4175-9174-4696bada484a","Type":"ContainerStarted","Data":"cad996b1bb6a4808da9181a9f0062555a1541578283f9207c4b33b5add39ff39"} Nov 24 13:41:01 crc kubenswrapper[5039]: I1124 13:41:01.501841 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0c658680-b162-434c-9957-74c7c997f6d0","Type":"ContainerStarted","Data":"3cf409b02e0702d6d1555d15a767821b0f142ed8874b0319ca5e18f68e5e1ce9"} Nov 24 13:41:01 crc kubenswrapper[5039]: I1124 13:41:01.527857 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" podStartSLOduration=7.527839457 podStartE2EDuration="7.527839457s" podCreationTimestamp="2025-11-24 13:40:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:41:01.52186593 +0000 UTC m=+1373.960990430" watchObservedRunningTime="2025-11-24 13:41:01.527839457 +0000 UTC m=+1373.966963947" Nov 24 13:41:01 crc kubenswrapper[5039]: I1124 13:41:01.555932 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=6.431566009 podStartE2EDuration="7.555908599s" podCreationTimestamp="2025-11-24 13:40:54 +0000 UTC" firstStartedPulling="2025-11-24 13:40:56.441411849 +0000 UTC m=+1368.880536349" lastFinishedPulling="2025-11-24 13:40:57.565754449 +0000 UTC m=+1370.004878939" observedRunningTime="2025-11-24 13:41:01.54577158 +0000 UTC m=+1373.984896080" watchObservedRunningTime="2025-11-24 13:41:01.555908599 +0000 UTC m=+1373.995033099" Nov 24 13:41:02 crc kubenswrapper[5039]: I1124 13:41:02.513322 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb35c10f-5f1f-4175-9174-4696bada484a","Type":"ContainerStarted","Data":"034a273616dc694275fdcce5cfb64e6ed269c5c01da997861bc702be0bbaf3ad"} Nov 24 13:41:03 crc kubenswrapper[5039]: I1124 13:41:03.015953 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 24 13:41:03 crc kubenswrapper[5039]: I1124 13:41:03.016184 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="bcfabb1c-c5cb-493e-bf10-e4d2245804c4" containerName="cinder-api-log" containerID="cri-o://635a6b4b5a5fc1a2b6be311aa390c601e5944087fbaf82d16c92069216e0b0b5" gracePeriod=30 Nov 24 13:41:03 crc kubenswrapper[5039]: I1124 13:41:03.016271 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="bcfabb1c-c5cb-493e-bf10-e4d2245804c4" containerName="cinder-api" containerID="cri-o://22529c43a0fc26d4147908227de33458db183d0e30bea0d8de09f4636c1f8516" gracePeriod=30 Nov 24 13:41:03 crc kubenswrapper[5039]: I1124 13:41:03.035033 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="bcfabb1c-c5cb-493e-bf10-e4d2245804c4" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.188:8776/healthcheck\": EOF" Nov 24 13:41:03 crc kubenswrapper[5039]: I1124 13:41:03.507338 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:41:03 crc kubenswrapper[5039]: I1124 13:41:03.543760 5039 generic.go:334] "Generic (PLEG): container finished" podID="bcfabb1c-c5cb-493e-bf10-e4d2245804c4" containerID="635a6b4b5a5fc1a2b6be311aa390c601e5944087fbaf82d16c92069216e0b0b5" exitCode=143 Nov 24 13:41:03 crc kubenswrapper[5039]: I1124 13:41:03.543820 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"bcfabb1c-c5cb-493e-bf10-e4d2245804c4","Type":"ContainerDied","Data":"635a6b4b5a5fc1a2b6be311aa390c601e5944087fbaf82d16c92069216e0b0b5"} Nov 24 13:41:03 crc kubenswrapper[5039]: I1124 13:41:03.564145 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:41:04 crc kubenswrapper[5039]: I1124 13:41:04.555541 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb35c10f-5f1f-4175-9174-4696bada484a","Type":"ContainerStarted","Data":"bfc41be41545f659adb793d89f7a38a9701c37ae9cf9302837c485713e46778b"} Nov 24 13:41:04 crc kubenswrapper[5039]: I1124 13:41:04.555868 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 13:41:04 crc kubenswrapper[5039]: I1124 13:41:04.592172 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.644215239 podStartE2EDuration="8.592147024s" podCreationTimestamp="2025-11-24 13:40:56 +0000 UTC" firstStartedPulling="2025-11-24 13:40:57.722001513 +0000 UTC m=+1370.161126013" lastFinishedPulling="2025-11-24 13:41:03.669933288 +0000 UTC m=+1376.109057798" observedRunningTime="2025-11-24 13:41:04.586263398 +0000 UTC m=+1377.025387898" watchObservedRunningTime="2025-11-24 13:41:04.592147024 +0000 UTC m=+1377.031271524" Nov 24 13:41:04 crc kubenswrapper[5039]: I1124 13:41:04.761099 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 24 13:41:04 crc kubenswrapper[5039]: I1124 13:41:04.763896 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7bdd5bd5df-sqgnq"] Nov 24 13:41:04 crc kubenswrapper[5039]: E1124 13:41:04.764796 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0082e16b-213f-4995-b96c-da1c3634191e" containerName="init" Nov 24 13:41:04 crc kubenswrapper[5039]: I1124 13:41:04.764822 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0082e16b-213f-4995-b96c-da1c3634191e" containerName="init" Nov 24 13:41:04 crc kubenswrapper[5039]: E1124 13:41:04.764836 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="626a5ed2-5dc0-47f1-af4c-f4f21adfda35" containerName="init" Nov 24 13:41:04 crc kubenswrapper[5039]: I1124 13:41:04.764844 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="626a5ed2-5dc0-47f1-af4c-f4f21adfda35" containerName="init" Nov 24 13:41:04 crc kubenswrapper[5039]: E1124 13:41:04.764861 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="626a5ed2-5dc0-47f1-af4c-f4f21adfda35" containerName="dnsmasq-dns" Nov 24 13:41:04 crc kubenswrapper[5039]: I1124 13:41:04.764870 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="626a5ed2-5dc0-47f1-af4c-f4f21adfda35" containerName="dnsmasq-dns" Nov 24 13:41:04 crc kubenswrapper[5039]: I1124 13:41:04.765165 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="626a5ed2-5dc0-47f1-af4c-f4f21adfda35" containerName="dnsmasq-dns" Nov 24 13:41:04 crc kubenswrapper[5039]: I1124 13:41:04.765192 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0082e16b-213f-4995-b96c-da1c3634191e" containerName="init" Nov 24 13:41:04 crc kubenswrapper[5039]: I1124 13:41:04.766480 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:04 crc kubenswrapper[5039]: I1124 13:41:04.771661 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 24 13:41:04 crc kubenswrapper[5039]: I1124 13:41:04.775947 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 24 13:41:04 crc kubenswrapper[5039]: I1124 13:41:04.782790 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7bdd5bd5df-sqgnq"] Nov 24 13:41:04 crc kubenswrapper[5039]: I1124 13:41:04.899602 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2zrr\" (UniqueName: \"kubernetes.io/projected/05e189da-8176-4c22-9069-51d7e5f8b867-kube-api-access-t2zrr\") pod \"neutron-7bdd5bd5df-sqgnq\" (UID: \"05e189da-8176-4c22-9069-51d7e5f8b867\") " pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:04 crc kubenswrapper[5039]: I1124 13:41:04.899664 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/05e189da-8176-4c22-9069-51d7e5f8b867-config\") pod \"neutron-7bdd5bd5df-sqgnq\" (UID: \"05e189da-8176-4c22-9069-51d7e5f8b867\") " pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:04 crc kubenswrapper[5039]: I1124 13:41:04.899699 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05e189da-8176-4c22-9069-51d7e5f8b867-combined-ca-bundle\") pod \"neutron-7bdd5bd5df-sqgnq\" (UID: \"05e189da-8176-4c22-9069-51d7e5f8b867\") " pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:04 crc kubenswrapper[5039]: I1124 13:41:04.899764 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/05e189da-8176-4c22-9069-51d7e5f8b867-internal-tls-certs\") pod \"neutron-7bdd5bd5df-sqgnq\" (UID: \"05e189da-8176-4c22-9069-51d7e5f8b867\") " pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:04 crc kubenswrapper[5039]: I1124 13:41:04.899824 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/05e189da-8176-4c22-9069-51d7e5f8b867-public-tls-certs\") pod \"neutron-7bdd5bd5df-sqgnq\" (UID: \"05e189da-8176-4c22-9069-51d7e5f8b867\") " pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:04 crc kubenswrapper[5039]: I1124 13:41:04.899859 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/05e189da-8176-4c22-9069-51d7e5f8b867-ovndb-tls-certs\") pod \"neutron-7bdd5bd5df-sqgnq\" (UID: \"05e189da-8176-4c22-9069-51d7e5f8b867\") " pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:04 crc kubenswrapper[5039]: I1124 13:41:04.899873 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/05e189da-8176-4c22-9069-51d7e5f8b867-httpd-config\") pod \"neutron-7bdd5bd5df-sqgnq\" (UID: \"05e189da-8176-4c22-9069-51d7e5f8b867\") " pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:05 crc kubenswrapper[5039]: I1124 13:41:05.001890 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2zrr\" (UniqueName: \"kubernetes.io/projected/05e189da-8176-4c22-9069-51d7e5f8b867-kube-api-access-t2zrr\") pod \"neutron-7bdd5bd5df-sqgnq\" (UID: \"05e189da-8176-4c22-9069-51d7e5f8b867\") " pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:05 crc kubenswrapper[5039]: I1124 13:41:05.002325 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/05e189da-8176-4c22-9069-51d7e5f8b867-config\") pod \"neutron-7bdd5bd5df-sqgnq\" (UID: \"05e189da-8176-4c22-9069-51d7e5f8b867\") " pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:05 crc kubenswrapper[5039]: I1124 13:41:05.003413 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05e189da-8176-4c22-9069-51d7e5f8b867-combined-ca-bundle\") pod \"neutron-7bdd5bd5df-sqgnq\" (UID: \"05e189da-8176-4c22-9069-51d7e5f8b867\") " pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:05 crc kubenswrapper[5039]: I1124 13:41:05.003488 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/05e189da-8176-4c22-9069-51d7e5f8b867-internal-tls-certs\") pod \"neutron-7bdd5bd5df-sqgnq\" (UID: \"05e189da-8176-4c22-9069-51d7e5f8b867\") " pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:05 crc kubenswrapper[5039]: I1124 13:41:05.003593 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/05e189da-8176-4c22-9069-51d7e5f8b867-public-tls-certs\") pod \"neutron-7bdd5bd5df-sqgnq\" (UID: \"05e189da-8176-4c22-9069-51d7e5f8b867\") " pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:05 crc kubenswrapper[5039]: I1124 13:41:05.003652 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/05e189da-8176-4c22-9069-51d7e5f8b867-ovndb-tls-certs\") pod \"neutron-7bdd5bd5df-sqgnq\" (UID: \"05e189da-8176-4c22-9069-51d7e5f8b867\") " pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:05 crc kubenswrapper[5039]: I1124 13:41:05.003674 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/05e189da-8176-4c22-9069-51d7e5f8b867-httpd-config\") pod \"neutron-7bdd5bd5df-sqgnq\" (UID: \"05e189da-8176-4c22-9069-51d7e5f8b867\") " pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:05 crc kubenswrapper[5039]: I1124 13:41:05.010526 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/05e189da-8176-4c22-9069-51d7e5f8b867-internal-tls-certs\") pod \"neutron-7bdd5bd5df-sqgnq\" (UID: \"05e189da-8176-4c22-9069-51d7e5f8b867\") " pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:05 crc kubenswrapper[5039]: I1124 13:41:05.010666 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/05e189da-8176-4c22-9069-51d7e5f8b867-ovndb-tls-certs\") pod \"neutron-7bdd5bd5df-sqgnq\" (UID: \"05e189da-8176-4c22-9069-51d7e5f8b867\") " pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:05 crc kubenswrapper[5039]: I1124 13:41:05.012214 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/05e189da-8176-4c22-9069-51d7e5f8b867-httpd-config\") pod \"neutron-7bdd5bd5df-sqgnq\" (UID: \"05e189da-8176-4c22-9069-51d7e5f8b867\") " pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:05 crc kubenswrapper[5039]: I1124 13:41:05.012622 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/05e189da-8176-4c22-9069-51d7e5f8b867-public-tls-certs\") pod \"neutron-7bdd5bd5df-sqgnq\" (UID: \"05e189da-8176-4c22-9069-51d7e5f8b867\") " pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:05 crc kubenswrapper[5039]: I1124 13:41:05.012911 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/05e189da-8176-4c22-9069-51d7e5f8b867-config\") pod \"neutron-7bdd5bd5df-sqgnq\" (UID: \"05e189da-8176-4c22-9069-51d7e5f8b867\") " pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:05 crc kubenswrapper[5039]: I1124 13:41:05.015332 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05e189da-8176-4c22-9069-51d7e5f8b867-combined-ca-bundle\") pod \"neutron-7bdd5bd5df-sqgnq\" (UID: \"05e189da-8176-4c22-9069-51d7e5f8b867\") " pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:05 crc kubenswrapper[5039]: I1124 13:41:05.029235 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 24 13:41:05 crc kubenswrapper[5039]: I1124 13:41:05.030254 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2zrr\" (UniqueName: \"kubernetes.io/projected/05e189da-8176-4c22-9069-51d7e5f8b867-kube-api-access-t2zrr\") pod \"neutron-7bdd5bd5df-sqgnq\" (UID: \"05e189da-8176-4c22-9069-51d7e5f8b867\") " pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:05 crc kubenswrapper[5039]: I1124 13:41:05.089461 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:05 crc kubenswrapper[5039]: I1124 13:41:05.603102 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 13:41:05 crc kubenswrapper[5039]: I1124 13:41:05.776343 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7bdd5bd5df-sqgnq"] Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.139439 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7c65477b5b-lzp7p"] Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.141514 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.144421 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.144816 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.160970 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7c65477b5b-lzp7p"] Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.234647 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/910059fe-375d-443a-8dce-3dd9d0ea7bce-config-data-custom\") pod \"barbican-api-7c65477b5b-lzp7p\" (UID: \"910059fe-375d-443a-8dce-3dd9d0ea7bce\") " pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.234685 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/910059fe-375d-443a-8dce-3dd9d0ea7bce-logs\") pod \"barbican-api-7c65477b5b-lzp7p\" (UID: \"910059fe-375d-443a-8dce-3dd9d0ea7bce\") " pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.234756 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/910059fe-375d-443a-8dce-3dd9d0ea7bce-combined-ca-bundle\") pod \"barbican-api-7c65477b5b-lzp7p\" (UID: \"910059fe-375d-443a-8dce-3dd9d0ea7bce\") " pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.234831 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppxvd\" (UniqueName: \"kubernetes.io/projected/910059fe-375d-443a-8dce-3dd9d0ea7bce-kube-api-access-ppxvd\") pod \"barbican-api-7c65477b5b-lzp7p\" (UID: \"910059fe-375d-443a-8dce-3dd9d0ea7bce\") " pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.234887 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/910059fe-375d-443a-8dce-3dd9d0ea7bce-internal-tls-certs\") pod \"barbican-api-7c65477b5b-lzp7p\" (UID: \"910059fe-375d-443a-8dce-3dd9d0ea7bce\") " pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.234915 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/910059fe-375d-443a-8dce-3dd9d0ea7bce-public-tls-certs\") pod \"barbican-api-7c65477b5b-lzp7p\" (UID: \"910059fe-375d-443a-8dce-3dd9d0ea7bce\") " pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.234998 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/910059fe-375d-443a-8dce-3dd9d0ea7bce-config-data\") pod \"barbican-api-7c65477b5b-lzp7p\" (UID: \"910059fe-375d-443a-8dce-3dd9d0ea7bce\") " pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.336512 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/910059fe-375d-443a-8dce-3dd9d0ea7bce-config-data-custom\") pod \"barbican-api-7c65477b5b-lzp7p\" (UID: \"910059fe-375d-443a-8dce-3dd9d0ea7bce\") " pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.336590 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/910059fe-375d-443a-8dce-3dd9d0ea7bce-logs\") pod \"barbican-api-7c65477b5b-lzp7p\" (UID: \"910059fe-375d-443a-8dce-3dd9d0ea7bce\") " pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.337059 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/910059fe-375d-443a-8dce-3dd9d0ea7bce-logs\") pod \"barbican-api-7c65477b5b-lzp7p\" (UID: \"910059fe-375d-443a-8dce-3dd9d0ea7bce\") " pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.337101 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/910059fe-375d-443a-8dce-3dd9d0ea7bce-combined-ca-bundle\") pod \"barbican-api-7c65477b5b-lzp7p\" (UID: \"910059fe-375d-443a-8dce-3dd9d0ea7bce\") " pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.337159 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppxvd\" (UniqueName: \"kubernetes.io/projected/910059fe-375d-443a-8dce-3dd9d0ea7bce-kube-api-access-ppxvd\") pod \"barbican-api-7c65477b5b-lzp7p\" (UID: \"910059fe-375d-443a-8dce-3dd9d0ea7bce\") " pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.337184 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/910059fe-375d-443a-8dce-3dd9d0ea7bce-internal-tls-certs\") pod \"barbican-api-7c65477b5b-lzp7p\" (UID: \"910059fe-375d-443a-8dce-3dd9d0ea7bce\") " pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.337447 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/910059fe-375d-443a-8dce-3dd9d0ea7bce-public-tls-certs\") pod \"barbican-api-7c65477b5b-lzp7p\" (UID: \"910059fe-375d-443a-8dce-3dd9d0ea7bce\") " pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.337763 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/910059fe-375d-443a-8dce-3dd9d0ea7bce-config-data\") pod \"barbican-api-7c65477b5b-lzp7p\" (UID: \"910059fe-375d-443a-8dce-3dd9d0ea7bce\") " pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.341592 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/910059fe-375d-443a-8dce-3dd9d0ea7bce-config-data-custom\") pod \"barbican-api-7c65477b5b-lzp7p\" (UID: \"910059fe-375d-443a-8dce-3dd9d0ea7bce\") " pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.341764 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/910059fe-375d-443a-8dce-3dd9d0ea7bce-combined-ca-bundle\") pod \"barbican-api-7c65477b5b-lzp7p\" (UID: \"910059fe-375d-443a-8dce-3dd9d0ea7bce\") " pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.342355 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/910059fe-375d-443a-8dce-3dd9d0ea7bce-config-data\") pod \"barbican-api-7c65477b5b-lzp7p\" (UID: \"910059fe-375d-443a-8dce-3dd9d0ea7bce\") " pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.351064 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/910059fe-375d-443a-8dce-3dd9d0ea7bce-internal-tls-certs\") pod \"barbican-api-7c65477b5b-lzp7p\" (UID: \"910059fe-375d-443a-8dce-3dd9d0ea7bce\") " pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.360463 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/910059fe-375d-443a-8dce-3dd9d0ea7bce-public-tls-certs\") pod \"barbican-api-7c65477b5b-lzp7p\" (UID: \"910059fe-375d-443a-8dce-3dd9d0ea7bce\") " pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.373166 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppxvd\" (UniqueName: \"kubernetes.io/projected/910059fe-375d-443a-8dce-3dd9d0ea7bce-kube-api-access-ppxvd\") pod \"barbican-api-7c65477b5b-lzp7p\" (UID: \"910059fe-375d-443a-8dce-3dd9d0ea7bce\") " pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.459996 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.578648 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7bdd5bd5df-sqgnq" event={"ID":"05e189da-8176-4c22-9069-51d7e5f8b867","Type":"ContainerStarted","Data":"4ae5338fde705ee639221fb16c4e5afb380eb53f32f4cb7f55e55a53cb653945"} Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.578700 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7bdd5bd5df-sqgnq" event={"ID":"05e189da-8176-4c22-9069-51d7e5f8b867","Type":"ContainerStarted","Data":"299c3e574c929479b4ecd2cc5362e4087994697421cc83416a465185474b1f6d"} Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.578814 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="0c658680-b162-434c-9957-74c7c997f6d0" containerName="cinder-scheduler" containerID="cri-o://ed41ca9709a824f17ac7b9d9e394ae840b7a1126682caf22b4b8297118fed767" gracePeriod=30 Nov 24 13:41:06 crc kubenswrapper[5039]: I1124 13:41:06.579328 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="0c658680-b162-434c-9957-74c7c997f6d0" containerName="probe" containerID="cri-o://3cf409b02e0702d6d1555d15a767821b0f142ed8874b0319ca5e18f68e5e1ce9" gracePeriod=30 Nov 24 13:41:07 crc kubenswrapper[5039]: I1124 13:41:07.029985 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7c65477b5b-lzp7p"] Nov 24 13:41:07 crc kubenswrapper[5039]: W1124 13:41:07.032750 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod910059fe_375d_443a_8dce_3dd9d0ea7bce.slice/crio-d617abfa32d1835de2831494f8fcbad80a1732cb57d126aca74ebb3dfda7fbf0 WatchSource:0}: Error finding container d617abfa32d1835de2831494f8fcbad80a1732cb57d126aca74ebb3dfda7fbf0: Status 404 returned error can't find the container with id d617abfa32d1835de2831494f8fcbad80a1732cb57d126aca74ebb3dfda7fbf0 Nov 24 13:41:07 crc kubenswrapper[5039]: I1124 13:41:07.596154 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7c65477b5b-lzp7p" event={"ID":"910059fe-375d-443a-8dce-3dd9d0ea7bce","Type":"ContainerStarted","Data":"15420e0b752d917d0d3979dddf9441becacc70f620fffb425c0d38f824acb5f8"} Nov 24 13:41:07 crc kubenswrapper[5039]: I1124 13:41:07.596474 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7c65477b5b-lzp7p" event={"ID":"910059fe-375d-443a-8dce-3dd9d0ea7bce","Type":"ContainerStarted","Data":"7d9cfd00ecd2201b7dbfbea3a21dd4c56ee192ef852bb50d94add8bae8e14977"} Nov 24 13:41:07 crc kubenswrapper[5039]: I1124 13:41:07.596484 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7c65477b5b-lzp7p" event={"ID":"910059fe-375d-443a-8dce-3dd9d0ea7bce","Type":"ContainerStarted","Data":"d617abfa32d1835de2831494f8fcbad80a1732cb57d126aca74ebb3dfda7fbf0"} Nov 24 13:41:07 crc kubenswrapper[5039]: I1124 13:41:07.597033 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:07 crc kubenswrapper[5039]: I1124 13:41:07.597081 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:07 crc kubenswrapper[5039]: I1124 13:41:07.615223 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7bdd5bd5df-sqgnq" event={"ID":"05e189da-8176-4c22-9069-51d7e5f8b867","Type":"ContainerStarted","Data":"2cc2cd6eafd5b72d1fee5733f4da79ff959eda70b2247874f685669a6858c30f"} Nov 24 13:41:07 crc kubenswrapper[5039]: I1124 13:41:07.615700 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:07 crc kubenswrapper[5039]: I1124 13:41:07.618398 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7c65477b5b-lzp7p" podStartSLOduration=1.6183786 podStartE2EDuration="1.6183786s" podCreationTimestamp="2025-11-24 13:41:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:41:07.615073529 +0000 UTC m=+1380.054198019" watchObservedRunningTime="2025-11-24 13:41:07.6183786 +0000 UTC m=+1380.057503100" Nov 24 13:41:07 crc kubenswrapper[5039]: I1124 13:41:07.619294 5039 generic.go:334] "Generic (PLEG): container finished" podID="0c658680-b162-434c-9957-74c7c997f6d0" containerID="ed41ca9709a824f17ac7b9d9e394ae840b7a1126682caf22b4b8297118fed767" exitCode=0 Nov 24 13:41:07 crc kubenswrapper[5039]: I1124 13:41:07.619330 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0c658680-b162-434c-9957-74c7c997f6d0","Type":"ContainerDied","Data":"ed41ca9709a824f17ac7b9d9e394ae840b7a1126682caf22b4b8297118fed767"} Nov 24 13:41:07 crc kubenswrapper[5039]: I1124 13:41:07.647011 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7bdd5bd5df-sqgnq" podStartSLOduration=3.646992745 podStartE2EDuration="3.646992745s" podCreationTimestamp="2025-11-24 13:41:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:41:07.639772037 +0000 UTC m=+1380.078896537" watchObservedRunningTime="2025-11-24 13:41:07.646992745 +0000 UTC m=+1380.086117235" Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.478635 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.608561 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-config-data\") pod \"0c658680-b162-434c-9957-74c7c997f6d0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.609000 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-combined-ca-bundle\") pod \"0c658680-b162-434c-9957-74c7c997f6d0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.609156 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qlkzb\" (UniqueName: \"kubernetes.io/projected/0c658680-b162-434c-9957-74c7c997f6d0-kube-api-access-qlkzb\") pod \"0c658680-b162-434c-9957-74c7c997f6d0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.609230 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-config-data-custom\") pod \"0c658680-b162-434c-9957-74c7c997f6d0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.609321 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-scripts\") pod \"0c658680-b162-434c-9957-74c7c997f6d0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.609394 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0c658680-b162-434c-9957-74c7c997f6d0-etc-machine-id\") pod \"0c658680-b162-434c-9957-74c7c997f6d0\" (UID: \"0c658680-b162-434c-9957-74c7c997f6d0\") " Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.611353 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0c658680-b162-434c-9957-74c7c997f6d0-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "0c658680-b162-434c-9957-74c7c997f6d0" (UID: "0c658680-b162-434c-9957-74c7c997f6d0"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.619444 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0c658680-b162-434c-9957-74c7c997f6d0" (UID: "0c658680-b162-434c-9957-74c7c997f6d0"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.628176 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c658680-b162-434c-9957-74c7c997f6d0-kube-api-access-qlkzb" (OuterVolumeSpecName: "kube-api-access-qlkzb") pod "0c658680-b162-434c-9957-74c7c997f6d0" (UID: "0c658680-b162-434c-9957-74c7c997f6d0"). InnerVolumeSpecName "kube-api-access-qlkzb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.628371 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-scripts" (OuterVolumeSpecName: "scripts") pod "0c658680-b162-434c-9957-74c7c997f6d0" (UID: "0c658680-b162-434c-9957-74c7c997f6d0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.666882 5039 generic.go:334] "Generic (PLEG): container finished" podID="0c658680-b162-434c-9957-74c7c997f6d0" containerID="3cf409b02e0702d6d1555d15a767821b0f142ed8874b0319ca5e18f68e5e1ce9" exitCode=0 Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.667173 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0c658680-b162-434c-9957-74c7c997f6d0","Type":"ContainerDied","Data":"3cf409b02e0702d6d1555d15a767821b0f142ed8874b0319ca5e18f68e5e1ce9"} Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.667217 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0c658680-b162-434c-9957-74c7c997f6d0","Type":"ContainerDied","Data":"6b8fd292c2e1cde7b536c33e354f458779c27c8a87e40f4c6630c2d87d78a1e2"} Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.667239 5039 scope.go:117] "RemoveContainer" containerID="3cf409b02e0702d6d1555d15a767821b0f142ed8874b0319ca5e18f68e5e1ce9" Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.669311 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.709377 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0c658680-b162-434c-9957-74c7c997f6d0" (UID: "0c658680-b162-434c-9957-74c7c997f6d0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.713843 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.713882 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qlkzb\" (UniqueName: \"kubernetes.io/projected/0c658680-b162-434c-9957-74c7c997f6d0-kube-api-access-qlkzb\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.713894 5039 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.713903 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.713911 5039 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0c658680-b162-434c-9957-74c7c997f6d0-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.752326 5039 scope.go:117] "RemoveContainer" containerID="ed41ca9709a824f17ac7b9d9e394ae840b7a1126682caf22b4b8297118fed767" Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.764882 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-config-data" (OuterVolumeSpecName: "config-data") pod "0c658680-b162-434c-9957-74c7c997f6d0" (UID: "0c658680-b162-434c-9957-74c7c997f6d0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.776258 5039 scope.go:117] "RemoveContainer" containerID="3cf409b02e0702d6d1555d15a767821b0f142ed8874b0319ca5e18f68e5e1ce9" Nov 24 13:41:08 crc kubenswrapper[5039]: E1124 13:41:08.777078 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3cf409b02e0702d6d1555d15a767821b0f142ed8874b0319ca5e18f68e5e1ce9\": container with ID starting with 3cf409b02e0702d6d1555d15a767821b0f142ed8874b0319ca5e18f68e5e1ce9 not found: ID does not exist" containerID="3cf409b02e0702d6d1555d15a767821b0f142ed8874b0319ca5e18f68e5e1ce9" Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.777116 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3cf409b02e0702d6d1555d15a767821b0f142ed8874b0319ca5e18f68e5e1ce9"} err="failed to get container status \"3cf409b02e0702d6d1555d15a767821b0f142ed8874b0319ca5e18f68e5e1ce9\": rpc error: code = NotFound desc = could not find container \"3cf409b02e0702d6d1555d15a767821b0f142ed8874b0319ca5e18f68e5e1ce9\": container with ID starting with 3cf409b02e0702d6d1555d15a767821b0f142ed8874b0319ca5e18f68e5e1ce9 not found: ID does not exist" Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.777141 5039 scope.go:117] "RemoveContainer" containerID="ed41ca9709a824f17ac7b9d9e394ae840b7a1126682caf22b4b8297118fed767" Nov 24 13:41:08 crc kubenswrapper[5039]: E1124 13:41:08.777393 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed41ca9709a824f17ac7b9d9e394ae840b7a1126682caf22b4b8297118fed767\": container with ID starting with ed41ca9709a824f17ac7b9d9e394ae840b7a1126682caf22b4b8297118fed767 not found: ID does not exist" containerID="ed41ca9709a824f17ac7b9d9e394ae840b7a1126682caf22b4b8297118fed767" Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.777418 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed41ca9709a824f17ac7b9d9e394ae840b7a1126682caf22b4b8297118fed767"} err="failed to get container status \"ed41ca9709a824f17ac7b9d9e394ae840b7a1126682caf22b4b8297118fed767\": rpc error: code = NotFound desc = could not find container \"ed41ca9709a824f17ac7b9d9e394ae840b7a1126682caf22b4b8297118fed767\": container with ID starting with ed41ca9709a824f17ac7b9d9e394ae840b7a1126682caf22b4b8297118fed767 not found: ID does not exist" Nov 24 13:41:08 crc kubenswrapper[5039]: I1124 13:41:08.815963 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c658680-b162-434c-9957-74c7c997f6d0-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.007176 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.017645 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.041676 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 13:41:09 crc kubenswrapper[5039]: E1124 13:41:09.042336 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c658680-b162-434c-9957-74c7c997f6d0" containerName="cinder-scheduler" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.042433 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c658680-b162-434c-9957-74c7c997f6d0" containerName="cinder-scheduler" Nov 24 13:41:09 crc kubenswrapper[5039]: E1124 13:41:09.042536 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c658680-b162-434c-9957-74c7c997f6d0" containerName="probe" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.042591 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c658680-b162-434c-9957-74c7c997f6d0" containerName="probe" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.042883 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c658680-b162-434c-9957-74c7c997f6d0" containerName="cinder-scheduler" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.042978 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c658680-b162-434c-9957-74c7c997f6d0" containerName="probe" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.044219 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.046789 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.054243 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.222108 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d1395fb6-6223-4aea-9a6d-e743cecd804e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d1395fb6-6223-4aea-9a6d-e743cecd804e\") " pod="openstack/cinder-scheduler-0" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.222168 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1395fb6-6223-4aea-9a6d-e743cecd804e-scripts\") pod \"cinder-scheduler-0\" (UID: \"d1395fb6-6223-4aea-9a6d-e743cecd804e\") " pod="openstack/cinder-scheduler-0" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.222223 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d1395fb6-6223-4aea-9a6d-e743cecd804e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d1395fb6-6223-4aea-9a6d-e743cecd804e\") " pod="openstack/cinder-scheduler-0" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.222272 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1395fb6-6223-4aea-9a6d-e743cecd804e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d1395fb6-6223-4aea-9a6d-e743cecd804e\") " pod="openstack/cinder-scheduler-0" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.222297 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ndnr\" (UniqueName: \"kubernetes.io/projected/d1395fb6-6223-4aea-9a6d-e743cecd804e-kube-api-access-7ndnr\") pod \"cinder-scheduler-0\" (UID: \"d1395fb6-6223-4aea-9a6d-e743cecd804e\") " pod="openstack/cinder-scheduler-0" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.222401 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1395fb6-6223-4aea-9a6d-e743cecd804e-config-data\") pod \"cinder-scheduler-0\" (UID: \"d1395fb6-6223-4aea-9a6d-e743cecd804e\") " pod="openstack/cinder-scheduler-0" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.323952 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1395fb6-6223-4aea-9a6d-e743cecd804e-scripts\") pod \"cinder-scheduler-0\" (UID: \"d1395fb6-6223-4aea-9a6d-e743cecd804e\") " pod="openstack/cinder-scheduler-0" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.324209 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d1395fb6-6223-4aea-9a6d-e743cecd804e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d1395fb6-6223-4aea-9a6d-e743cecd804e\") " pod="openstack/cinder-scheduler-0" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.324337 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1395fb6-6223-4aea-9a6d-e743cecd804e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d1395fb6-6223-4aea-9a6d-e743cecd804e\") " pod="openstack/cinder-scheduler-0" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.324419 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ndnr\" (UniqueName: \"kubernetes.io/projected/d1395fb6-6223-4aea-9a6d-e743cecd804e-kube-api-access-7ndnr\") pod \"cinder-scheduler-0\" (UID: \"d1395fb6-6223-4aea-9a6d-e743cecd804e\") " pod="openstack/cinder-scheduler-0" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.324577 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1395fb6-6223-4aea-9a6d-e743cecd804e-config-data\") pod \"cinder-scheduler-0\" (UID: \"d1395fb6-6223-4aea-9a6d-e743cecd804e\") " pod="openstack/cinder-scheduler-0" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.324735 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d1395fb6-6223-4aea-9a6d-e743cecd804e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d1395fb6-6223-4aea-9a6d-e743cecd804e\") " pod="openstack/cinder-scheduler-0" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.324805 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d1395fb6-6223-4aea-9a6d-e743cecd804e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d1395fb6-6223-4aea-9a6d-e743cecd804e\") " pod="openstack/cinder-scheduler-0" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.329138 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d1395fb6-6223-4aea-9a6d-e743cecd804e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d1395fb6-6223-4aea-9a6d-e743cecd804e\") " pod="openstack/cinder-scheduler-0" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.329462 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1395fb6-6223-4aea-9a6d-e743cecd804e-config-data\") pod \"cinder-scheduler-0\" (UID: \"d1395fb6-6223-4aea-9a6d-e743cecd804e\") " pod="openstack/cinder-scheduler-0" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.334527 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1395fb6-6223-4aea-9a6d-e743cecd804e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d1395fb6-6223-4aea-9a6d-e743cecd804e\") " pod="openstack/cinder-scheduler-0" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.340551 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1395fb6-6223-4aea-9a6d-e743cecd804e-scripts\") pod \"cinder-scheduler-0\" (UID: \"d1395fb6-6223-4aea-9a6d-e743cecd804e\") " pod="openstack/cinder-scheduler-0" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.341308 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ndnr\" (UniqueName: \"kubernetes.io/projected/d1395fb6-6223-4aea-9a6d-e743cecd804e-kube-api-access-7ndnr\") pod \"cinder-scheduler-0\" (UID: \"d1395fb6-6223-4aea-9a6d-e743cecd804e\") " pod="openstack/cinder-scheduler-0" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.406216 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 24 13:41:09 crc kubenswrapper[5039]: I1124 13:41:09.869629 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 13:41:10 crc kubenswrapper[5039]: I1124 13:41:10.133883 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:41:10 crc kubenswrapper[5039]: I1124 13:41:10.161802 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="bcfabb1c-c5cb-493e-bf10-e4d2245804c4" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.188:8776/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 24 13:41:10 crc kubenswrapper[5039]: I1124 13:41:10.207484 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-sw4mc"] Nov 24 13:41:10 crc kubenswrapper[5039]: I1124 13:41:10.207761 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" podUID="9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21" containerName="dnsmasq-dns" containerID="cri-o://e3785e5f236852810dbb5f0ccced6212d751b5f6809673aa541679fde30ab67c" gracePeriod=10 Nov 24 13:41:10 crc kubenswrapper[5039]: I1124 13:41:10.374052 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c658680-b162-434c-9957-74c7c997f6d0" path="/var/lib/kubelet/pods/0c658680-b162-434c-9957-74c7c997f6d0/volumes" Nov 24 13:41:10 crc kubenswrapper[5039]: I1124 13:41:10.437477 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="bcfabb1c-c5cb-493e-bf10-e4d2245804c4" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.188:8776/healthcheck\": read tcp 10.217.0.2:48782->10.217.0.188:8776: read: connection reset by peer" Nov 24 13:41:10 crc kubenswrapper[5039]: I1124 13:41:10.732714 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d1395fb6-6223-4aea-9a6d-e743cecd804e","Type":"ContainerStarted","Data":"01b17bd96fe5b309a2fe71fd462bc457b385c5a8616d9fbb2939363bdf0eda63"} Nov 24 13:41:10 crc kubenswrapper[5039]: I1124 13:41:10.776845 5039 generic.go:334] "Generic (PLEG): container finished" podID="bcfabb1c-c5cb-493e-bf10-e4d2245804c4" containerID="22529c43a0fc26d4147908227de33458db183d0e30bea0d8de09f4636c1f8516" exitCode=0 Nov 24 13:41:10 crc kubenswrapper[5039]: I1124 13:41:10.776954 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"bcfabb1c-c5cb-493e-bf10-e4d2245804c4","Type":"ContainerDied","Data":"22529c43a0fc26d4147908227de33458db183d0e30bea0d8de09f4636c1f8516"} Nov 24 13:41:10 crc kubenswrapper[5039]: I1124 13:41:10.819886 5039 generic.go:334] "Generic (PLEG): container finished" podID="9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21" containerID="e3785e5f236852810dbb5f0ccced6212d751b5f6809673aa541679fde30ab67c" exitCode=0 Nov 24 13:41:10 crc kubenswrapper[5039]: I1124 13:41:10.819959 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" event={"ID":"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21","Type":"ContainerDied","Data":"e3785e5f236852810dbb5f0ccced6212d751b5f6809673aa541679fde30ab67c"} Nov 24 13:41:10 crc kubenswrapper[5039]: I1124 13:41:10.975920 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.069824 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-config\") pod \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.069973 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-dns-svc\") pod \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.070033 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-ovsdbserver-sb\") pod \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.070098 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-dns-swift-storage-0\") pod \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.070166 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-ovsdbserver-nb\") pod \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.070187 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2wwgg\" (UniqueName: \"kubernetes.io/projected/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-kube-api-access-2wwgg\") pod \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\" (UID: \"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21\") " Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.089607 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-kube-api-access-2wwgg" (OuterVolumeSpecName: "kube-api-access-2wwgg") pod "9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21" (UID: "9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21"). InnerVolumeSpecName "kube-api-access-2wwgg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.164897 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21" (UID: "9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.179431 5039 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.179474 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2wwgg\" (UniqueName: \"kubernetes.io/projected/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-kube-api-access-2wwgg\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.223389 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21" (UID: "9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.232396 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21" (UID: "9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.232907 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-config" (OuterVolumeSpecName: "config") pod "9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21" (UID: "9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.259288 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21" (UID: "9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.282308 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.282340 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.282352 5039 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.282361 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.345675 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.490049 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-config-data-custom\") pod \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.490382 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-combined-ca-bundle\") pod \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.490495 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-etc-machine-id\") pod \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.490637 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-scripts\") pod \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.490751 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-logs\") pod \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.490958 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nxrv7\" (UniqueName: \"kubernetes.io/projected/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-kube-api-access-nxrv7\") pod \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.491034 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-config-data\") pod \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\" (UID: \"bcfabb1c-c5cb-493e-bf10-e4d2245804c4\") " Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.491912 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "bcfabb1c-c5cb-493e-bf10-e4d2245804c4" (UID: "bcfabb1c-c5cb-493e-bf10-e4d2245804c4"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.492793 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-logs" (OuterVolumeSpecName: "logs") pod "bcfabb1c-c5cb-493e-bf10-e4d2245804c4" (UID: "bcfabb1c-c5cb-493e-bf10-e4d2245804c4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.494893 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "bcfabb1c-c5cb-493e-bf10-e4d2245804c4" (UID: "bcfabb1c-c5cb-493e-bf10-e4d2245804c4"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.499711 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-scripts" (OuterVolumeSpecName: "scripts") pod "bcfabb1c-c5cb-493e-bf10-e4d2245804c4" (UID: "bcfabb1c-c5cb-493e-bf10-e4d2245804c4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.509652 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-kube-api-access-nxrv7" (OuterVolumeSpecName: "kube-api-access-nxrv7") pod "bcfabb1c-c5cb-493e-bf10-e4d2245804c4" (UID: "bcfabb1c-c5cb-493e-bf10-e4d2245804c4"). InnerVolumeSpecName "kube-api-access-nxrv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.536707 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bcfabb1c-c5cb-493e-bf10-e4d2245804c4" (UID: "bcfabb1c-c5cb-493e-bf10-e4d2245804c4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.563726 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-config-data" (OuterVolumeSpecName: "config-data") pod "bcfabb1c-c5cb-493e-bf10-e4d2245804c4" (UID: "bcfabb1c-c5cb-493e-bf10-e4d2245804c4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.593866 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nxrv7\" (UniqueName: \"kubernetes.io/projected/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-kube-api-access-nxrv7\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.593905 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.593916 5039 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.593925 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.593936 5039 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.593944 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.593953 5039 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bcfabb1c-c5cb-493e-bf10-e4d2245804c4-logs\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.830868 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" event={"ID":"9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21","Type":"ContainerDied","Data":"65570993434dab4b93a2c64ec323510d083e3dd499c03eb26db56180755c7c64"} Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.831160 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fcf4b695-sw4mc" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.831190 5039 scope.go:117] "RemoveContainer" containerID="e3785e5f236852810dbb5f0ccced6212d751b5f6809673aa541679fde30ab67c" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.834596 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d1395fb6-6223-4aea-9a6d-e743cecd804e","Type":"ContainerStarted","Data":"fec54d24fdeec3fe8d72abe398a13efa21680e94f4a1c46706681addbd4b4513"} Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.834640 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d1395fb6-6223-4aea-9a6d-e743cecd804e","Type":"ContainerStarted","Data":"446f3ecd7d78dd50e1c76c77e38f85394bcda3488ebbb76dca94b69c1aa87261"} Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.838897 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"bcfabb1c-c5cb-493e-bf10-e4d2245804c4","Type":"ContainerDied","Data":"21c1ad017c85fd158d4dac75cbbf6b85558fd68e315b13670eb762d967bf11ff"} Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.838995 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.861272 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=2.861252813 podStartE2EDuration="2.861252813s" podCreationTimestamp="2025-11-24 13:41:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:41:11.854439115 +0000 UTC m=+1384.293563625" watchObservedRunningTime="2025-11-24 13:41:11.861252813 +0000 UTC m=+1384.300377313" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.874909 5039 scope.go:117] "RemoveContainer" containerID="da9544984bc7bed87b2addb4937d3b07e5a7e66bad28910ae0e06cc210d24320" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.887627 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-sw4mc"] Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.946078 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-sw4mc"] Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.947931 5039 scope.go:117] "RemoveContainer" containerID="22529c43a0fc26d4147908227de33458db183d0e30bea0d8de09f4636c1f8516" Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.969733 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.987570 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 24 13:41:11 crc kubenswrapper[5039]: I1124 13:41:11.998701 5039 scope.go:117] "RemoveContainer" containerID="635a6b4b5a5fc1a2b6be311aa390c601e5944087fbaf82d16c92069216e0b0b5" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.012812 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 24 13:41:12 crc kubenswrapper[5039]: E1124 13:41:12.013484 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcfabb1c-c5cb-493e-bf10-e4d2245804c4" containerName="cinder-api" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.013532 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcfabb1c-c5cb-493e-bf10-e4d2245804c4" containerName="cinder-api" Nov 24 13:41:12 crc kubenswrapper[5039]: E1124 13:41:12.013549 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcfabb1c-c5cb-493e-bf10-e4d2245804c4" containerName="cinder-api-log" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.013558 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcfabb1c-c5cb-493e-bf10-e4d2245804c4" containerName="cinder-api-log" Nov 24 13:41:12 crc kubenswrapper[5039]: E1124 13:41:12.013629 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21" containerName="init" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.013639 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21" containerName="init" Nov 24 13:41:12 crc kubenswrapper[5039]: E1124 13:41:12.013654 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21" containerName="dnsmasq-dns" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.013661 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21" containerName="dnsmasq-dns" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.014025 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcfabb1c-c5cb-493e-bf10-e4d2245804c4" containerName="cinder-api-log" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.014055 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21" containerName="dnsmasq-dns" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.014074 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcfabb1c-c5cb-493e-bf10-e4d2245804c4" containerName="cinder-api" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.015576 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.019080 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.019108 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.019306 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.023809 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.109849 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1847d7c-086b-4615-81d8-a6c5e915dcb4-logs\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.109975 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1847d7c-086b-4615-81d8-a6c5e915dcb4-public-tls-certs\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.110142 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1847d7c-086b-4615-81d8-a6c5e915dcb4-config-data\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.110201 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1847d7c-086b-4615-81d8-a6c5e915dcb4-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.110260 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1847d7c-086b-4615-81d8-a6c5e915dcb4-scripts\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.110294 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tg59\" (UniqueName: \"kubernetes.io/projected/d1847d7c-086b-4615-81d8-a6c5e915dcb4-kube-api-access-6tg59\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.110328 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d1847d7c-086b-4615-81d8-a6c5e915dcb4-config-data-custom\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.110442 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1847d7c-086b-4615-81d8-a6c5e915dcb4-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.110603 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d1847d7c-086b-4615-81d8-a6c5e915dcb4-etc-machine-id\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.212029 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1847d7c-086b-4615-81d8-a6c5e915dcb4-config-data\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.212096 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1847d7c-086b-4615-81d8-a6c5e915dcb4-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.212130 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1847d7c-086b-4615-81d8-a6c5e915dcb4-scripts\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.212148 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tg59\" (UniqueName: \"kubernetes.io/projected/d1847d7c-086b-4615-81d8-a6c5e915dcb4-kube-api-access-6tg59\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.212175 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d1847d7c-086b-4615-81d8-a6c5e915dcb4-config-data-custom\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.212204 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1847d7c-086b-4615-81d8-a6c5e915dcb4-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.212262 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d1847d7c-086b-4615-81d8-a6c5e915dcb4-etc-machine-id\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.212309 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1847d7c-086b-4615-81d8-a6c5e915dcb4-logs\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.212347 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1847d7c-086b-4615-81d8-a6c5e915dcb4-public-tls-certs\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.212390 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d1847d7c-086b-4615-81d8-a6c5e915dcb4-etc-machine-id\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.212791 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1847d7c-086b-4615-81d8-a6c5e915dcb4-logs\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.218189 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1847d7c-086b-4615-81d8-a6c5e915dcb4-scripts\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.219114 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d1847d7c-086b-4615-81d8-a6c5e915dcb4-config-data-custom\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.219905 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1847d7c-086b-4615-81d8-a6c5e915dcb4-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.221864 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1847d7c-086b-4615-81d8-a6c5e915dcb4-public-tls-certs\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.230329 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1847d7c-086b-4615-81d8-a6c5e915dcb4-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.231720 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tg59\" (UniqueName: \"kubernetes.io/projected/d1847d7c-086b-4615-81d8-a6c5e915dcb4-kube-api-access-6tg59\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.233357 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1847d7c-086b-4615-81d8-a6c5e915dcb4-config-data\") pod \"cinder-api-0\" (UID: \"d1847d7c-086b-4615-81d8-a6c5e915dcb4\") " pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.325598 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21" path="/var/lib/kubelet/pods/9b25bdbd-eb8e-4ef9-9697-6ecbdd09bc21/volumes" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.326744 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bcfabb1c-c5cb-493e-bf10-e4d2245804c4" path="/var/lib/kubelet/pods/bcfabb1c-c5cb-493e-bf10-e4d2245804c4/volumes" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.404447 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.772690 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.795316 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-66cb4657dd-z97bx" Nov 24 13:41:12 crc kubenswrapper[5039]: I1124 13:41:12.981302 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 24 13:41:13 crc kubenswrapper[5039]: I1124 13:41:13.872451 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d1847d7c-086b-4615-81d8-a6c5e915dcb4","Type":"ContainerStarted","Data":"ae0eca5db7fa5a6db3f86030981a181a3f278be921876cef647fba50254a5708"} Nov 24 13:41:13 crc kubenswrapper[5039]: I1124 13:41:13.877070 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d1847d7c-086b-4615-81d8-a6c5e915dcb4","Type":"ContainerStarted","Data":"10f05ac66373ed3b921e2ace8672c8116370f0146b156efd02949404e3b716a2"} Nov 24 13:41:14 crc kubenswrapper[5039]: I1124 13:41:14.047794 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-576959578d-mb556" Nov 24 13:41:14 crc kubenswrapper[5039]: I1124 13:41:14.406455 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 24 13:41:14 crc kubenswrapper[5039]: I1124 13:41:14.884860 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d1847d7c-086b-4615-81d8-a6c5e915dcb4","Type":"ContainerStarted","Data":"1aa52c6919c43005915ddd0dd49344bc024efe6ef833ba3a44f645d807a86411"} Nov 24 13:41:14 crc kubenswrapper[5039]: I1124 13:41:14.885233 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 24 13:41:14 crc kubenswrapper[5039]: I1124 13:41:14.909640 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.9096205360000003 podStartE2EDuration="3.909620536s" podCreationTimestamp="2025-11-24 13:41:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:41:14.907008822 +0000 UTC m=+1387.346133332" watchObservedRunningTime="2025-11-24 13:41:14.909620536 +0000 UTC m=+1387.348745056" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.666141 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-78dc996954-b5s9v"] Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.668135 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-78dc996954-b5s9v" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.672640 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.672806 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.689470 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-wclsb" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.700656 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-78dc996954-b5s9v"] Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.718947 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8625b618-e756-46ad-a646-c94e824a1e83-config-data\") pod \"heat-engine-78dc996954-b5s9v\" (UID: \"8625b618-e756-46ad-a646-c94e824a1e83\") " pod="openstack/heat-engine-78dc996954-b5s9v" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.719404 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dcx8t\" (UniqueName: \"kubernetes.io/projected/8625b618-e756-46ad-a646-c94e824a1e83-kube-api-access-dcx8t\") pod \"heat-engine-78dc996954-b5s9v\" (UID: \"8625b618-e756-46ad-a646-c94e824a1e83\") " pod="openstack/heat-engine-78dc996954-b5s9v" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.719540 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8625b618-e756-46ad-a646-c94e824a1e83-combined-ca-bundle\") pod \"heat-engine-78dc996954-b5s9v\" (UID: \"8625b618-e756-46ad-a646-c94e824a1e83\") " pod="openstack/heat-engine-78dc996954-b5s9v" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.719662 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8625b618-e756-46ad-a646-c94e824a1e83-config-data-custom\") pod \"heat-engine-78dc996954-b5s9v\" (UID: \"8625b618-e756-46ad-a646-c94e824a1e83\") " pod="openstack/heat-engine-78dc996954-b5s9v" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.821273 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8625b618-e756-46ad-a646-c94e824a1e83-config-data\") pod \"heat-engine-78dc996954-b5s9v\" (UID: \"8625b618-e756-46ad-a646-c94e824a1e83\") " pod="openstack/heat-engine-78dc996954-b5s9v" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.821689 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dcx8t\" (UniqueName: \"kubernetes.io/projected/8625b618-e756-46ad-a646-c94e824a1e83-kube-api-access-dcx8t\") pod \"heat-engine-78dc996954-b5s9v\" (UID: \"8625b618-e756-46ad-a646-c94e824a1e83\") " pod="openstack/heat-engine-78dc996954-b5s9v" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.821723 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8625b618-e756-46ad-a646-c94e824a1e83-combined-ca-bundle\") pod \"heat-engine-78dc996954-b5s9v\" (UID: \"8625b618-e756-46ad-a646-c94e824a1e83\") " pod="openstack/heat-engine-78dc996954-b5s9v" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.821785 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8625b618-e756-46ad-a646-c94e824a1e83-config-data-custom\") pod \"heat-engine-78dc996954-b5s9v\" (UID: \"8625b618-e756-46ad-a646-c94e824a1e83\") " pod="openstack/heat-engine-78dc996954-b5s9v" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.830580 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-sfp6r"] Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.832932 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.855550 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-sfp6r"] Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.861722 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8625b618-e756-46ad-a646-c94e824a1e83-combined-ca-bundle\") pod \"heat-engine-78dc996954-b5s9v\" (UID: \"8625b618-e756-46ad-a646-c94e824a1e83\") " pod="openstack/heat-engine-78dc996954-b5s9v" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.863703 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8625b618-e756-46ad-a646-c94e824a1e83-config-data-custom\") pod \"heat-engine-78dc996954-b5s9v\" (UID: \"8625b618-e756-46ad-a646-c94e824a1e83\") " pod="openstack/heat-engine-78dc996954-b5s9v" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.864001 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8625b618-e756-46ad-a646-c94e824a1e83-config-data\") pod \"heat-engine-78dc996954-b5s9v\" (UID: \"8625b618-e756-46ad-a646-c94e824a1e83\") " pod="openstack/heat-engine-78dc996954-b5s9v" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.870367 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dcx8t\" (UniqueName: \"kubernetes.io/projected/8625b618-e756-46ad-a646-c94e824a1e83-kube-api-access-dcx8t\") pod \"heat-engine-78dc996954-b5s9v\" (UID: \"8625b618-e756-46ad-a646-c94e824a1e83\") " pod="openstack/heat-engine-78dc996954-b5s9v" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.892691 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-554bdcfd44-79lq6"] Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.894221 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-554bdcfd44-79lq6" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.899813 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.914204 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.916006 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.920304 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-pgnzf" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.920477 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.920688 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.923678 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-sfp6r\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.923725 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fdafa1d4-1e80-420d-a2a1-4017bd9144be-config-data-custom\") pod \"heat-cfnapi-554bdcfd44-79lq6\" (UID: \"fdafa1d4-1e80-420d-a2a1-4017bd9144be\") " pod="openstack/heat-cfnapi-554bdcfd44-79lq6" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.923751 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6bjw\" (UniqueName: \"kubernetes.io/projected/fdafa1d4-1e80-420d-a2a1-4017bd9144be-kube-api-access-s6bjw\") pod \"heat-cfnapi-554bdcfd44-79lq6\" (UID: \"fdafa1d4-1e80-420d-a2a1-4017bd9144be\") " pod="openstack/heat-cfnapi-554bdcfd44-79lq6" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.923798 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-sfp6r\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.923856 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-config\") pod \"dnsmasq-dns-f6bc4c6c9-sfp6r\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.923916 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4srnb\" (UniqueName: \"kubernetes.io/projected/34b45a67-b6e3-40cb-ad22-52fc9e26292e-kube-api-access-4srnb\") pod \"dnsmasq-dns-f6bc4c6c9-sfp6r\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.923940 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdafa1d4-1e80-420d-a2a1-4017bd9144be-combined-ca-bundle\") pod \"heat-cfnapi-554bdcfd44-79lq6\" (UID: \"fdafa1d4-1e80-420d-a2a1-4017bd9144be\") " pod="openstack/heat-cfnapi-554bdcfd44-79lq6" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.923958 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-sfp6r\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.923995 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdafa1d4-1e80-420d-a2a1-4017bd9144be-config-data\") pod \"heat-cfnapi-554bdcfd44-79lq6\" (UID: \"fdafa1d4-1e80-420d-a2a1-4017bd9144be\") " pod="openstack/heat-cfnapi-554bdcfd44-79lq6" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.924209 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-sfp6r\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.940305 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-554bdcfd44-79lq6"] Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.959943 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.970066 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-58cdd4bdc9-hd6w5"] Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.972098 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-58cdd4bdc9-hd6w5" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.974757 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.980035 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-58cdd4bdc9-hd6w5"] Nov 24 13:41:17 crc kubenswrapper[5039]: I1124 13:41:17.995802 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-78dc996954-b5s9v" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.025435 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4srnb\" (UniqueName: \"kubernetes.io/projected/34b45a67-b6e3-40cb-ad22-52fc9e26292e-kube-api-access-4srnb\") pod \"dnsmasq-dns-f6bc4c6c9-sfp6r\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.025483 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdafa1d4-1e80-420d-a2a1-4017bd9144be-combined-ca-bundle\") pod \"heat-cfnapi-554bdcfd44-79lq6\" (UID: \"fdafa1d4-1e80-420d-a2a1-4017bd9144be\") " pod="openstack/heat-cfnapi-554bdcfd44-79lq6" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.025527 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-sfp6r\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.025568 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6xvm\" (UniqueName: \"kubernetes.io/projected/8bb936ec-11da-428d-93ed-33745690864a-kube-api-access-q6xvm\") pod \"heat-api-58cdd4bdc9-hd6w5\" (UID: \"8bb936ec-11da-428d-93ed-33745690864a\") " pod="openstack/heat-api-58cdd4bdc9-hd6w5" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.025595 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bb936ec-11da-428d-93ed-33745690864a-combined-ca-bundle\") pod \"heat-api-58cdd4bdc9-hd6w5\" (UID: \"8bb936ec-11da-428d-93ed-33745690864a\") " pod="openstack/heat-api-58cdd4bdc9-hd6w5" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.025622 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bb936ec-11da-428d-93ed-33745690864a-config-data\") pod \"heat-api-58cdd4bdc9-hd6w5\" (UID: \"8bb936ec-11da-428d-93ed-33745690864a\") " pod="openstack/heat-api-58cdd4bdc9-hd6w5" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.025654 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdafa1d4-1e80-420d-a2a1-4017bd9144be-config-data\") pod \"heat-cfnapi-554bdcfd44-79lq6\" (UID: \"fdafa1d4-1e80-420d-a2a1-4017bd9144be\") " pod="openstack/heat-cfnapi-554bdcfd44-79lq6" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.025676 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-sfp6r\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.025701 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-sfp6r\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.025730 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fdafa1d4-1e80-420d-a2a1-4017bd9144be-config-data-custom\") pod \"heat-cfnapi-554bdcfd44-79lq6\" (UID: \"fdafa1d4-1e80-420d-a2a1-4017bd9144be\") " pod="openstack/heat-cfnapi-554bdcfd44-79lq6" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.025750 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6bjw\" (UniqueName: \"kubernetes.io/projected/fdafa1d4-1e80-420d-a2a1-4017bd9144be-kube-api-access-s6bjw\") pod \"heat-cfnapi-554bdcfd44-79lq6\" (UID: \"fdafa1d4-1e80-420d-a2a1-4017bd9144be\") " pod="openstack/heat-cfnapi-554bdcfd44-79lq6" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.025772 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/bbd0fae4-aa15-46d2-8118-f738c3c1dc3c-openstack-config-secret\") pod \"openstackclient\" (UID: \"bbd0fae4-aa15-46d2-8118-f738c3c1dc3c\") " pod="openstack/openstackclient" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.025800 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhmcg\" (UniqueName: \"kubernetes.io/projected/bbd0fae4-aa15-46d2-8118-f738c3c1dc3c-kube-api-access-fhmcg\") pod \"openstackclient\" (UID: \"bbd0fae4-aa15-46d2-8118-f738c3c1dc3c\") " pod="openstack/openstackclient" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.025821 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbd0fae4-aa15-46d2-8118-f738c3c1dc3c-combined-ca-bundle\") pod \"openstackclient\" (UID: \"bbd0fae4-aa15-46d2-8118-f738c3c1dc3c\") " pod="openstack/openstackclient" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.025838 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/bbd0fae4-aa15-46d2-8118-f738c3c1dc3c-openstack-config\") pod \"openstackclient\" (UID: \"bbd0fae4-aa15-46d2-8118-f738c3c1dc3c\") " pod="openstack/openstackclient" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.025858 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-sfp6r\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.025914 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-config\") pod \"dnsmasq-dns-f6bc4c6c9-sfp6r\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.025951 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8bb936ec-11da-428d-93ed-33745690864a-config-data-custom\") pod \"heat-api-58cdd4bdc9-hd6w5\" (UID: \"8bb936ec-11da-428d-93ed-33745690864a\") " pod="openstack/heat-api-58cdd4bdc9-hd6w5" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.026662 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-sfp6r\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.027309 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-sfp6r\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.027865 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-sfp6r\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.028468 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-sfp6r\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.029291 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-config\") pod \"dnsmasq-dns-f6bc4c6c9-sfp6r\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.033240 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fdafa1d4-1e80-420d-a2a1-4017bd9144be-config-data-custom\") pod \"heat-cfnapi-554bdcfd44-79lq6\" (UID: \"fdafa1d4-1e80-420d-a2a1-4017bd9144be\") " pod="openstack/heat-cfnapi-554bdcfd44-79lq6" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.035054 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdafa1d4-1e80-420d-a2a1-4017bd9144be-config-data\") pod \"heat-cfnapi-554bdcfd44-79lq6\" (UID: \"fdafa1d4-1e80-420d-a2a1-4017bd9144be\") " pod="openstack/heat-cfnapi-554bdcfd44-79lq6" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.036333 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdafa1d4-1e80-420d-a2a1-4017bd9144be-combined-ca-bundle\") pod \"heat-cfnapi-554bdcfd44-79lq6\" (UID: \"fdafa1d4-1e80-420d-a2a1-4017bd9144be\") " pod="openstack/heat-cfnapi-554bdcfd44-79lq6" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.056380 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4srnb\" (UniqueName: \"kubernetes.io/projected/34b45a67-b6e3-40cb-ad22-52fc9e26292e-kube-api-access-4srnb\") pod \"dnsmasq-dns-f6bc4c6c9-sfp6r\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.069276 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6bjw\" (UniqueName: \"kubernetes.io/projected/fdafa1d4-1e80-420d-a2a1-4017bd9144be-kube-api-access-s6bjw\") pod \"heat-cfnapi-554bdcfd44-79lq6\" (UID: \"fdafa1d4-1e80-420d-a2a1-4017bd9144be\") " pod="openstack/heat-cfnapi-554bdcfd44-79lq6" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.135457 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8bb936ec-11da-428d-93ed-33745690864a-config-data-custom\") pod \"heat-api-58cdd4bdc9-hd6w5\" (UID: \"8bb936ec-11da-428d-93ed-33745690864a\") " pod="openstack/heat-api-58cdd4bdc9-hd6w5" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.136009 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6xvm\" (UniqueName: \"kubernetes.io/projected/8bb936ec-11da-428d-93ed-33745690864a-kube-api-access-q6xvm\") pod \"heat-api-58cdd4bdc9-hd6w5\" (UID: \"8bb936ec-11da-428d-93ed-33745690864a\") " pod="openstack/heat-api-58cdd4bdc9-hd6w5" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.136036 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bb936ec-11da-428d-93ed-33745690864a-combined-ca-bundle\") pod \"heat-api-58cdd4bdc9-hd6w5\" (UID: \"8bb936ec-11da-428d-93ed-33745690864a\") " pod="openstack/heat-api-58cdd4bdc9-hd6w5" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.136071 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bb936ec-11da-428d-93ed-33745690864a-config-data\") pod \"heat-api-58cdd4bdc9-hd6w5\" (UID: \"8bb936ec-11da-428d-93ed-33745690864a\") " pod="openstack/heat-api-58cdd4bdc9-hd6w5" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.136220 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/bbd0fae4-aa15-46d2-8118-f738c3c1dc3c-openstack-config-secret\") pod \"openstackclient\" (UID: \"bbd0fae4-aa15-46d2-8118-f738c3c1dc3c\") " pod="openstack/openstackclient" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.136262 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhmcg\" (UniqueName: \"kubernetes.io/projected/bbd0fae4-aa15-46d2-8118-f738c3c1dc3c-kube-api-access-fhmcg\") pod \"openstackclient\" (UID: \"bbd0fae4-aa15-46d2-8118-f738c3c1dc3c\") " pod="openstack/openstackclient" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.136295 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbd0fae4-aa15-46d2-8118-f738c3c1dc3c-combined-ca-bundle\") pod \"openstackclient\" (UID: \"bbd0fae4-aa15-46d2-8118-f738c3c1dc3c\") " pod="openstack/openstackclient" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.136321 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/bbd0fae4-aa15-46d2-8118-f738c3c1dc3c-openstack-config\") pod \"openstackclient\" (UID: \"bbd0fae4-aa15-46d2-8118-f738c3c1dc3c\") " pod="openstack/openstackclient" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.137742 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/bbd0fae4-aa15-46d2-8118-f738c3c1dc3c-openstack-config\") pod \"openstackclient\" (UID: \"bbd0fae4-aa15-46d2-8118-f738c3c1dc3c\") " pod="openstack/openstackclient" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.149209 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bb936ec-11da-428d-93ed-33745690864a-combined-ca-bundle\") pod \"heat-api-58cdd4bdc9-hd6w5\" (UID: \"8bb936ec-11da-428d-93ed-33745690864a\") " pod="openstack/heat-api-58cdd4bdc9-hd6w5" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.154851 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8bb936ec-11da-428d-93ed-33745690864a-config-data-custom\") pod \"heat-api-58cdd4bdc9-hd6w5\" (UID: \"8bb936ec-11da-428d-93ed-33745690864a\") " pod="openstack/heat-api-58cdd4bdc9-hd6w5" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.156970 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/bbd0fae4-aa15-46d2-8118-f738c3c1dc3c-openstack-config-secret\") pod \"openstackclient\" (UID: \"bbd0fae4-aa15-46d2-8118-f738c3c1dc3c\") " pod="openstack/openstackclient" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.157017 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbd0fae4-aa15-46d2-8118-f738c3c1dc3c-combined-ca-bundle\") pod \"openstackclient\" (UID: \"bbd0fae4-aa15-46d2-8118-f738c3c1dc3c\") " pod="openstack/openstackclient" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.159409 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bb936ec-11da-428d-93ed-33745690864a-config-data\") pod \"heat-api-58cdd4bdc9-hd6w5\" (UID: \"8bb936ec-11da-428d-93ed-33745690864a\") " pod="openstack/heat-api-58cdd4bdc9-hd6w5" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.163456 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6xvm\" (UniqueName: \"kubernetes.io/projected/8bb936ec-11da-428d-93ed-33745690864a-kube-api-access-q6xvm\") pod \"heat-api-58cdd4bdc9-hd6w5\" (UID: \"8bb936ec-11da-428d-93ed-33745690864a\") " pod="openstack/heat-api-58cdd4bdc9-hd6w5" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.171620 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhmcg\" (UniqueName: \"kubernetes.io/projected/bbd0fae4-aa15-46d2-8118-f738c3c1dc3c-kube-api-access-fhmcg\") pod \"openstackclient\" (UID: \"bbd0fae4-aa15-46d2-8118-f738c3c1dc3c\") " pod="openstack/openstackclient" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.233936 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.247436 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-58cdd4bdc9-hd6w5" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.275459 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.302029 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.325114 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-554bdcfd44-79lq6" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.446515 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7c65477b5b-lzp7p" Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.565635 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-cd9b99d5d-xfvxc"] Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.566072 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-cd9b99d5d-xfvxc" podUID="b291da35-220c-4325-bef8-891d00c483cb" containerName="barbican-api-log" containerID="cri-o://fa7ab55ab582921b4ab93b986000346106e4b854c36bf820f9845d4255a0ac1a" gracePeriod=30 Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.566618 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-cd9b99d5d-xfvxc" podUID="b291da35-220c-4325-bef8-891d00c483cb" containerName="barbican-api" containerID="cri-o://eccb1942bd9f6301d01ef443a846abf033d43febca3c9bb34f22a1126bdab967" gracePeriod=30 Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.592302 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-78dc996954-b5s9v"] Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.880599 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.964823 5039 generic.go:334] "Generic (PLEG): container finished" podID="b291da35-220c-4325-bef8-891d00c483cb" containerID="fa7ab55ab582921b4ab93b986000346106e4b854c36bf820f9845d4255a0ac1a" exitCode=143 Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.964968 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-cd9b99d5d-xfvxc" event={"ID":"b291da35-220c-4325-bef8-891d00c483cb","Type":"ContainerDied","Data":"fa7ab55ab582921b4ab93b986000346106e4b854c36bf820f9845d4255a0ac1a"} Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.977792 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"bbd0fae4-aa15-46d2-8118-f738c3c1dc3c","Type":"ContainerStarted","Data":"0585ebc9a7f41b0c1059694ae4d88c44b1fce88ecb7fbfe4cc78bfcc2bd4abca"} Nov 24 13:41:18 crc kubenswrapper[5039]: I1124 13:41:18.992676 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-78dc996954-b5s9v" event={"ID":"8625b618-e756-46ad-a646-c94e824a1e83","Type":"ContainerStarted","Data":"f3e0f8e511fe49088ea2841775ae09bcbb861af077f09a9e4127f93d23ed74e3"} Nov 24 13:41:19 crc kubenswrapper[5039]: I1124 13:41:19.019264 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-58cdd4bdc9-hd6w5"] Nov 24 13:41:19 crc kubenswrapper[5039]: W1124 13:41:19.035702 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8bb936ec_11da_428d_93ed_33745690864a.slice/crio-3a471328be1bf8d72c78e1fb1764afe1fc8c88c920cb3e2354d04cd92dc8d4bc WatchSource:0}: Error finding container 3a471328be1bf8d72c78e1fb1764afe1fc8c88c920cb3e2354d04cd92dc8d4bc: Status 404 returned error can't find the container with id 3a471328be1bf8d72c78e1fb1764afe1fc8c88c920cb3e2354d04cd92dc8d4bc Nov 24 13:41:19 crc kubenswrapper[5039]: I1124 13:41:19.184361 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-sfp6r"] Nov 24 13:41:19 crc kubenswrapper[5039]: I1124 13:41:19.464197 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-554bdcfd44-79lq6"] Nov 24 13:41:19 crc kubenswrapper[5039]: W1124 13:41:19.474461 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfdafa1d4_1e80_420d_a2a1_4017bd9144be.slice/crio-5d356064884c86b86ca0d54702d0914e708e78c337129e9c72e8f0bdd9c0fdea WatchSource:0}: Error finding container 5d356064884c86b86ca0d54702d0914e708e78c337129e9c72e8f0bdd9c0fdea: Status 404 returned error can't find the container with id 5d356064884c86b86ca0d54702d0914e708e78c337129e9c72e8f0bdd9c0fdea Nov 24 13:41:19 crc kubenswrapper[5039]: I1124 13:41:19.813972 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 24 13:41:20 crc kubenswrapper[5039]: I1124 13:41:20.004951 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-58cdd4bdc9-hd6w5" event={"ID":"8bb936ec-11da-428d-93ed-33745690864a","Type":"ContainerStarted","Data":"3a471328be1bf8d72c78e1fb1764afe1fc8c88c920cb3e2354d04cd92dc8d4bc"} Nov 24 13:41:20 crc kubenswrapper[5039]: I1124 13:41:20.007238 5039 generic.go:334] "Generic (PLEG): container finished" podID="34b45a67-b6e3-40cb-ad22-52fc9e26292e" containerID="e6cf42989e392131e09aace2b212e20e38fffa2c02da6271ff4e65c554a88e2d" exitCode=0 Nov 24 13:41:20 crc kubenswrapper[5039]: I1124 13:41:20.007319 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" event={"ID":"34b45a67-b6e3-40cb-ad22-52fc9e26292e","Type":"ContainerDied","Data":"e6cf42989e392131e09aace2b212e20e38fffa2c02da6271ff4e65c554a88e2d"} Nov 24 13:41:20 crc kubenswrapper[5039]: I1124 13:41:20.007358 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" event={"ID":"34b45a67-b6e3-40cb-ad22-52fc9e26292e","Type":"ContainerStarted","Data":"473ea016fe3b210d1934ec388f19d86f671755386615b7fca8303f0654276c84"} Nov 24 13:41:20 crc kubenswrapper[5039]: I1124 13:41:20.010460 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-78dc996954-b5s9v" event={"ID":"8625b618-e756-46ad-a646-c94e824a1e83","Type":"ContainerStarted","Data":"cc040e658a5e864f49b6a89bf502538beec554a4a79a4c2d235f2963492cdd7e"} Nov 24 13:41:20 crc kubenswrapper[5039]: I1124 13:41:20.011457 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-78dc996954-b5s9v" Nov 24 13:41:20 crc kubenswrapper[5039]: I1124 13:41:20.013298 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-554bdcfd44-79lq6" event={"ID":"fdafa1d4-1e80-420d-a2a1-4017bd9144be","Type":"ContainerStarted","Data":"5d356064884c86b86ca0d54702d0914e708e78c337129e9c72e8f0bdd9c0fdea"} Nov 24 13:41:20 crc kubenswrapper[5039]: I1124 13:41:20.063332 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-78dc996954-b5s9v" podStartSLOduration=3.063316402 podStartE2EDuration="3.063316402s" podCreationTimestamp="2025-11-24 13:41:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:41:20.059370265 +0000 UTC m=+1392.498494765" watchObservedRunningTime="2025-11-24 13:41:20.063316402 +0000 UTC m=+1392.502440902" Nov 24 13:41:21 crc kubenswrapper[5039]: I1124 13:41:21.039435 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" event={"ID":"34b45a67-b6e3-40cb-ad22-52fc9e26292e","Type":"ContainerStarted","Data":"c756125ac99fa60a53a8cc12394dc3de2a94ea95034fd695efb28614da375b93"} Nov 24 13:41:21 crc kubenswrapper[5039]: I1124 13:41:21.063321 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" podStartSLOduration=4.063297715 podStartE2EDuration="4.063297715s" podCreationTimestamp="2025-11-24 13:41:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:41:21.061366737 +0000 UTC m=+1393.500491237" watchObservedRunningTime="2025-11-24 13:41:21.063297715 +0000 UTC m=+1393.502422215" Nov 24 13:41:22 crc kubenswrapper[5039]: I1124 13:41:22.050451 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:22 crc kubenswrapper[5039]: I1124 13:41:22.240205 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-cd9b99d5d-xfvxc" podUID="b291da35-220c-4325-bef8-891d00c483cb" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.183:9311/healthcheck\": read tcp 10.217.0.2:33084->10.217.0.183:9311: read: connection reset by peer" Nov 24 13:41:22 crc kubenswrapper[5039]: I1124 13:41:22.240260 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-cd9b99d5d-xfvxc" podUID="b291da35-220c-4325-bef8-891d00c483cb" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.183:9311/healthcheck\": read tcp 10.217.0.2:33072->10.217.0.183:9311: read: connection reset by peer" Nov 24 13:41:22 crc kubenswrapper[5039]: I1124 13:41:22.689394 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-c4b94795b-c6c2f" Nov 24 13:41:23 crc kubenswrapper[5039]: I1124 13:41:23.065011 5039 generic.go:334] "Generic (PLEG): container finished" podID="b291da35-220c-4325-bef8-891d00c483cb" containerID="eccb1942bd9f6301d01ef443a846abf033d43febca3c9bb34f22a1126bdab967" exitCode=0 Nov 24 13:41:23 crc kubenswrapper[5039]: I1124 13:41:23.065117 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-cd9b99d5d-xfvxc" event={"ID":"b291da35-220c-4325-bef8-891d00c483cb","Type":"ContainerDied","Data":"eccb1942bd9f6301d01ef443a846abf033d43febca3c9bb34f22a1126bdab967"} Nov 24 13:41:23 crc kubenswrapper[5039]: I1124 13:41:23.552765 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:41:23 crc kubenswrapper[5039]: I1124 13:41:23.578870 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b291da35-220c-4325-bef8-891d00c483cb-config-data-custom\") pod \"b291da35-220c-4325-bef8-891d00c483cb\" (UID: \"b291da35-220c-4325-bef8-891d00c483cb\") " Nov 24 13:41:23 crc kubenswrapper[5039]: I1124 13:41:23.579011 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tjn4x\" (UniqueName: \"kubernetes.io/projected/b291da35-220c-4325-bef8-891d00c483cb-kube-api-access-tjn4x\") pod \"b291da35-220c-4325-bef8-891d00c483cb\" (UID: \"b291da35-220c-4325-bef8-891d00c483cb\") " Nov 24 13:41:23 crc kubenswrapper[5039]: I1124 13:41:23.579097 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b291da35-220c-4325-bef8-891d00c483cb-combined-ca-bundle\") pod \"b291da35-220c-4325-bef8-891d00c483cb\" (UID: \"b291da35-220c-4325-bef8-891d00c483cb\") " Nov 24 13:41:23 crc kubenswrapper[5039]: I1124 13:41:23.579165 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b291da35-220c-4325-bef8-891d00c483cb-logs\") pod \"b291da35-220c-4325-bef8-891d00c483cb\" (UID: \"b291da35-220c-4325-bef8-891d00c483cb\") " Nov 24 13:41:23 crc kubenswrapper[5039]: I1124 13:41:23.579246 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b291da35-220c-4325-bef8-891d00c483cb-config-data\") pod \"b291da35-220c-4325-bef8-891d00c483cb\" (UID: \"b291da35-220c-4325-bef8-891d00c483cb\") " Nov 24 13:41:23 crc kubenswrapper[5039]: I1124 13:41:23.588143 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b291da35-220c-4325-bef8-891d00c483cb-logs" (OuterVolumeSpecName: "logs") pod "b291da35-220c-4325-bef8-891d00c483cb" (UID: "b291da35-220c-4325-bef8-891d00c483cb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:41:23 crc kubenswrapper[5039]: I1124 13:41:23.600316 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b291da35-220c-4325-bef8-891d00c483cb-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b291da35-220c-4325-bef8-891d00c483cb" (UID: "b291da35-220c-4325-bef8-891d00c483cb"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:23 crc kubenswrapper[5039]: I1124 13:41:23.616434 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b291da35-220c-4325-bef8-891d00c483cb-kube-api-access-tjn4x" (OuterVolumeSpecName: "kube-api-access-tjn4x") pod "b291da35-220c-4325-bef8-891d00c483cb" (UID: "b291da35-220c-4325-bef8-891d00c483cb"). InnerVolumeSpecName "kube-api-access-tjn4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:41:23 crc kubenswrapper[5039]: I1124 13:41:23.681674 5039 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b291da35-220c-4325-bef8-891d00c483cb-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:23 crc kubenswrapper[5039]: I1124 13:41:23.681928 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tjn4x\" (UniqueName: \"kubernetes.io/projected/b291da35-220c-4325-bef8-891d00c483cb-kube-api-access-tjn4x\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:23 crc kubenswrapper[5039]: I1124 13:41:23.681940 5039 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b291da35-220c-4325-bef8-891d00c483cb-logs\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:23 crc kubenswrapper[5039]: I1124 13:41:23.719165 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b291da35-220c-4325-bef8-891d00c483cb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b291da35-220c-4325-bef8-891d00c483cb" (UID: "b291da35-220c-4325-bef8-891d00c483cb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:23 crc kubenswrapper[5039]: I1124 13:41:23.752704 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b291da35-220c-4325-bef8-891d00c483cb-config-data" (OuterVolumeSpecName: "config-data") pod "b291da35-220c-4325-bef8-891d00c483cb" (UID: "b291da35-220c-4325-bef8-891d00c483cb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:23 crc kubenswrapper[5039]: I1124 13:41:23.784064 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b291da35-220c-4325-bef8-891d00c483cb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:23 crc kubenswrapper[5039]: I1124 13:41:23.784104 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b291da35-220c-4325-bef8-891d00c483cb-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:24 crc kubenswrapper[5039]: I1124 13:41:24.090692 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-554bdcfd44-79lq6" event={"ID":"fdafa1d4-1e80-420d-a2a1-4017bd9144be","Type":"ContainerStarted","Data":"fb38b8bf941b98e0d62d0bb1454a7e7f94676c78c2fe9268917699ed00e067d1"} Nov 24 13:41:24 crc kubenswrapper[5039]: I1124 13:41:24.091847 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-554bdcfd44-79lq6" Nov 24 13:41:24 crc kubenswrapper[5039]: I1124 13:41:24.108182 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-cd9b99d5d-xfvxc" event={"ID":"b291da35-220c-4325-bef8-891d00c483cb","Type":"ContainerDied","Data":"aecd07853f12f36c3cb95266ebe29032fd9760f8bf296744259357692996c74e"} Nov 24 13:41:24 crc kubenswrapper[5039]: I1124 13:41:24.108233 5039 scope.go:117] "RemoveContainer" containerID="eccb1942bd9f6301d01ef443a846abf033d43febca3c9bb34f22a1126bdab967" Nov 24 13:41:24 crc kubenswrapper[5039]: I1124 13:41:24.108352 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-cd9b99d5d-xfvxc" Nov 24 13:41:24 crc kubenswrapper[5039]: I1124 13:41:24.118041 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-58cdd4bdc9-hd6w5" event={"ID":"8bb936ec-11da-428d-93ed-33745690864a","Type":"ContainerStarted","Data":"9b027226f77dc29632f20c9f4ae36818bb0eeb4466f11dc8c62464099d548b0d"} Nov 24 13:41:24 crc kubenswrapper[5039]: I1124 13:41:24.120027 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-58cdd4bdc9-hd6w5" Nov 24 13:41:24 crc kubenswrapper[5039]: I1124 13:41:24.129342 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-554bdcfd44-79lq6" podStartSLOduration=3.439194372 podStartE2EDuration="7.129324973s" podCreationTimestamp="2025-11-24 13:41:17 +0000 UTC" firstStartedPulling="2025-11-24 13:41:19.491859538 +0000 UTC m=+1391.930984038" lastFinishedPulling="2025-11-24 13:41:23.181990149 +0000 UTC m=+1395.621114639" observedRunningTime="2025-11-24 13:41:24.107041133 +0000 UTC m=+1396.546165643" watchObservedRunningTime="2025-11-24 13:41:24.129324973 +0000 UTC m=+1396.568449473" Nov 24 13:41:24 crc kubenswrapper[5039]: I1124 13:41:24.158484 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-58cdd4bdc9-hd6w5" podStartSLOduration=3.088688648 podStartE2EDuration="7.158471132s" podCreationTimestamp="2025-11-24 13:41:17 +0000 UTC" firstStartedPulling="2025-11-24 13:41:19.101834599 +0000 UTC m=+1391.540959099" lastFinishedPulling="2025-11-24 13:41:23.171617083 +0000 UTC m=+1395.610741583" observedRunningTime="2025-11-24 13:41:24.155429177 +0000 UTC m=+1396.594553677" watchObservedRunningTime="2025-11-24 13:41:24.158471132 +0000 UTC m=+1396.597595632" Nov 24 13:41:24 crc kubenswrapper[5039]: I1124 13:41:24.178971 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-cd9b99d5d-xfvxc"] Nov 24 13:41:24 crc kubenswrapper[5039]: I1124 13:41:24.201268 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-cd9b99d5d-xfvxc"] Nov 24 13:41:24 crc kubenswrapper[5039]: I1124 13:41:24.221716 5039 scope.go:117] "RemoveContainer" containerID="fa7ab55ab582921b4ab93b986000346106e4b854c36bf820f9845d4255a0ac1a" Nov 24 13:41:24 crc kubenswrapper[5039]: I1124 13:41:24.321567 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b291da35-220c-4325-bef8-891d00c483cb" path="/var/lib/kubelet/pods/b291da35-220c-4325-bef8-891d00c483cb/volumes" Nov 24 13:41:25 crc kubenswrapper[5039]: I1124 13:41:25.547653 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.147066 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-75cf4567b8-dlwgp"] Nov 24 13:41:26 crc kubenswrapper[5039]: E1124 13:41:26.147738 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b291da35-220c-4325-bef8-891d00c483cb" containerName="barbican-api" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.147756 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b291da35-220c-4325-bef8-891d00c483cb" containerName="barbican-api" Nov 24 13:41:26 crc kubenswrapper[5039]: E1124 13:41:26.147786 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b291da35-220c-4325-bef8-891d00c483cb" containerName="barbican-api-log" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.147793 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b291da35-220c-4325-bef8-891d00c483cb" containerName="barbican-api-log" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.147976 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="b291da35-220c-4325-bef8-891d00c483cb" containerName="barbican-api" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.148003 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="b291da35-220c-4325-bef8-891d00c483cb" containerName="barbican-api-log" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.148768 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-75cf4567b8-dlwgp" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.164495 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-75cf4567b8-dlwgp"] Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.178646 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-86b5f44b95-4qtzc"] Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.179983 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-86b5f44b95-4qtzc" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.257990 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ed94ecdc-8218-45f3-b908-7a2410b57196-config-data-custom\") pod \"heat-engine-75cf4567b8-dlwgp\" (UID: \"ed94ecdc-8218-45f3-b908-7a2410b57196\") " pod="openstack/heat-engine-75cf4567b8-dlwgp" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.258073 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed94ecdc-8218-45f3-b908-7a2410b57196-config-data\") pod \"heat-engine-75cf4567b8-dlwgp\" (UID: \"ed94ecdc-8218-45f3-b908-7a2410b57196\") " pod="openstack/heat-engine-75cf4567b8-dlwgp" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.258092 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09227c0c-ba64-4216-8bac-a8c0f88706c3-config-data-custom\") pod \"heat-api-86b5f44b95-4qtzc\" (UID: \"09227c0c-ba64-4216-8bac-a8c0f88706c3\") " pod="openstack/heat-api-86b5f44b95-4qtzc" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.258126 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed94ecdc-8218-45f3-b908-7a2410b57196-combined-ca-bundle\") pod \"heat-engine-75cf4567b8-dlwgp\" (UID: \"ed94ecdc-8218-45f3-b908-7a2410b57196\") " pod="openstack/heat-engine-75cf4567b8-dlwgp" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.258183 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09227c0c-ba64-4216-8bac-a8c0f88706c3-combined-ca-bundle\") pod \"heat-api-86b5f44b95-4qtzc\" (UID: \"09227c0c-ba64-4216-8bac-a8c0f88706c3\") " pod="openstack/heat-api-86b5f44b95-4qtzc" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.258230 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpnfl\" (UniqueName: \"kubernetes.io/projected/09227c0c-ba64-4216-8bac-a8c0f88706c3-kube-api-access-xpnfl\") pod \"heat-api-86b5f44b95-4qtzc\" (UID: \"09227c0c-ba64-4216-8bac-a8c0f88706c3\") " pod="openstack/heat-api-86b5f44b95-4qtzc" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.258318 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09227c0c-ba64-4216-8bac-a8c0f88706c3-config-data\") pod \"heat-api-86b5f44b95-4qtzc\" (UID: \"09227c0c-ba64-4216-8bac-a8c0f88706c3\") " pod="openstack/heat-api-86b5f44b95-4qtzc" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.258408 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfc8f\" (UniqueName: \"kubernetes.io/projected/ed94ecdc-8218-45f3-b908-7a2410b57196-kube-api-access-cfc8f\") pod \"heat-engine-75cf4567b8-dlwgp\" (UID: \"ed94ecdc-8218-45f3-b908-7a2410b57196\") " pod="openstack/heat-engine-75cf4567b8-dlwgp" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.272600 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-86b5f44b95-4qtzc"] Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.328359 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-c9b799fcd-wrw68"] Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.337004 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-c9b799fcd-wrw68" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.340927 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-c9b799fcd-wrw68"] Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.360243 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfc8f\" (UniqueName: \"kubernetes.io/projected/ed94ecdc-8218-45f3-b908-7a2410b57196-kube-api-access-cfc8f\") pod \"heat-engine-75cf4567b8-dlwgp\" (UID: \"ed94ecdc-8218-45f3-b908-7a2410b57196\") " pod="openstack/heat-engine-75cf4567b8-dlwgp" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.360361 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ed94ecdc-8218-45f3-b908-7a2410b57196-config-data-custom\") pod \"heat-engine-75cf4567b8-dlwgp\" (UID: \"ed94ecdc-8218-45f3-b908-7a2410b57196\") " pod="openstack/heat-engine-75cf4567b8-dlwgp" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.360407 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed94ecdc-8218-45f3-b908-7a2410b57196-config-data\") pod \"heat-engine-75cf4567b8-dlwgp\" (UID: \"ed94ecdc-8218-45f3-b908-7a2410b57196\") " pod="openstack/heat-engine-75cf4567b8-dlwgp" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.360463 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09227c0c-ba64-4216-8bac-a8c0f88706c3-config-data-custom\") pod \"heat-api-86b5f44b95-4qtzc\" (UID: \"09227c0c-ba64-4216-8bac-a8c0f88706c3\") " pod="openstack/heat-api-86b5f44b95-4qtzc" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.360518 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed94ecdc-8218-45f3-b908-7a2410b57196-combined-ca-bundle\") pod \"heat-engine-75cf4567b8-dlwgp\" (UID: \"ed94ecdc-8218-45f3-b908-7a2410b57196\") " pod="openstack/heat-engine-75cf4567b8-dlwgp" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.360557 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09227c0c-ba64-4216-8bac-a8c0f88706c3-combined-ca-bundle\") pod \"heat-api-86b5f44b95-4qtzc\" (UID: \"09227c0c-ba64-4216-8bac-a8c0f88706c3\") " pod="openstack/heat-api-86b5f44b95-4qtzc" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.360598 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpnfl\" (UniqueName: \"kubernetes.io/projected/09227c0c-ba64-4216-8bac-a8c0f88706c3-kube-api-access-xpnfl\") pod \"heat-api-86b5f44b95-4qtzc\" (UID: \"09227c0c-ba64-4216-8bac-a8c0f88706c3\") " pod="openstack/heat-api-86b5f44b95-4qtzc" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.360708 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09227c0c-ba64-4216-8bac-a8c0f88706c3-config-data\") pod \"heat-api-86b5f44b95-4qtzc\" (UID: \"09227c0c-ba64-4216-8bac-a8c0f88706c3\") " pod="openstack/heat-api-86b5f44b95-4qtzc" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.370233 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ed94ecdc-8218-45f3-b908-7a2410b57196-config-data-custom\") pod \"heat-engine-75cf4567b8-dlwgp\" (UID: \"ed94ecdc-8218-45f3-b908-7a2410b57196\") " pod="openstack/heat-engine-75cf4567b8-dlwgp" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.371274 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09227c0c-ba64-4216-8bac-a8c0f88706c3-config-data\") pod \"heat-api-86b5f44b95-4qtzc\" (UID: \"09227c0c-ba64-4216-8bac-a8c0f88706c3\") " pod="openstack/heat-api-86b5f44b95-4qtzc" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.372933 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed94ecdc-8218-45f3-b908-7a2410b57196-combined-ca-bundle\") pod \"heat-engine-75cf4567b8-dlwgp\" (UID: \"ed94ecdc-8218-45f3-b908-7a2410b57196\") " pod="openstack/heat-engine-75cf4567b8-dlwgp" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.376391 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed94ecdc-8218-45f3-b908-7a2410b57196-config-data\") pod \"heat-engine-75cf4567b8-dlwgp\" (UID: \"ed94ecdc-8218-45f3-b908-7a2410b57196\") " pod="openstack/heat-engine-75cf4567b8-dlwgp" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.378143 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09227c0c-ba64-4216-8bac-a8c0f88706c3-combined-ca-bundle\") pod \"heat-api-86b5f44b95-4qtzc\" (UID: \"09227c0c-ba64-4216-8bac-a8c0f88706c3\") " pod="openstack/heat-api-86b5f44b95-4qtzc" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.379599 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09227c0c-ba64-4216-8bac-a8c0f88706c3-config-data-custom\") pod \"heat-api-86b5f44b95-4qtzc\" (UID: \"09227c0c-ba64-4216-8bac-a8c0f88706c3\") " pod="openstack/heat-api-86b5f44b95-4qtzc" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.389732 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfc8f\" (UniqueName: \"kubernetes.io/projected/ed94ecdc-8218-45f3-b908-7a2410b57196-kube-api-access-cfc8f\") pod \"heat-engine-75cf4567b8-dlwgp\" (UID: \"ed94ecdc-8218-45f3-b908-7a2410b57196\") " pod="openstack/heat-engine-75cf4567b8-dlwgp" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.390813 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpnfl\" (UniqueName: \"kubernetes.io/projected/09227c0c-ba64-4216-8bac-a8c0f88706c3-kube-api-access-xpnfl\") pod \"heat-api-86b5f44b95-4qtzc\" (UID: \"09227c0c-ba64-4216-8bac-a8c0f88706c3\") " pod="openstack/heat-api-86b5f44b95-4qtzc" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.463087 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/291fbee7-ab14-439c-9bfc-845225d607ae-combined-ca-bundle\") pod \"heat-cfnapi-c9b799fcd-wrw68\" (UID: \"291fbee7-ab14-439c-9bfc-845225d607ae\") " pod="openstack/heat-cfnapi-c9b799fcd-wrw68" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.463131 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/291fbee7-ab14-439c-9bfc-845225d607ae-config-data\") pod \"heat-cfnapi-c9b799fcd-wrw68\" (UID: \"291fbee7-ab14-439c-9bfc-845225d607ae\") " pod="openstack/heat-cfnapi-c9b799fcd-wrw68" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.463207 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/291fbee7-ab14-439c-9bfc-845225d607ae-config-data-custom\") pod \"heat-cfnapi-c9b799fcd-wrw68\" (UID: \"291fbee7-ab14-439c-9bfc-845225d607ae\") " pod="openstack/heat-cfnapi-c9b799fcd-wrw68" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.463419 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhchp\" (UniqueName: \"kubernetes.io/projected/291fbee7-ab14-439c-9bfc-845225d607ae-kube-api-access-qhchp\") pod \"heat-cfnapi-c9b799fcd-wrw68\" (UID: \"291fbee7-ab14-439c-9bfc-845225d607ae\") " pod="openstack/heat-cfnapi-c9b799fcd-wrw68" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.467177 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-75cf4567b8-dlwgp" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.529244 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-86b5f44b95-4qtzc" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.565618 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/291fbee7-ab14-439c-9bfc-845225d607ae-config-data-custom\") pod \"heat-cfnapi-c9b799fcd-wrw68\" (UID: \"291fbee7-ab14-439c-9bfc-845225d607ae\") " pod="openstack/heat-cfnapi-c9b799fcd-wrw68" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.566013 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhchp\" (UniqueName: \"kubernetes.io/projected/291fbee7-ab14-439c-9bfc-845225d607ae-kube-api-access-qhchp\") pod \"heat-cfnapi-c9b799fcd-wrw68\" (UID: \"291fbee7-ab14-439c-9bfc-845225d607ae\") " pod="openstack/heat-cfnapi-c9b799fcd-wrw68" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.566097 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/291fbee7-ab14-439c-9bfc-845225d607ae-combined-ca-bundle\") pod \"heat-cfnapi-c9b799fcd-wrw68\" (UID: \"291fbee7-ab14-439c-9bfc-845225d607ae\") " pod="openstack/heat-cfnapi-c9b799fcd-wrw68" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.566114 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/291fbee7-ab14-439c-9bfc-845225d607ae-config-data\") pod \"heat-cfnapi-c9b799fcd-wrw68\" (UID: \"291fbee7-ab14-439c-9bfc-845225d607ae\") " pod="openstack/heat-cfnapi-c9b799fcd-wrw68" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.571692 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/291fbee7-ab14-439c-9bfc-845225d607ae-combined-ca-bundle\") pod \"heat-cfnapi-c9b799fcd-wrw68\" (UID: \"291fbee7-ab14-439c-9bfc-845225d607ae\") " pod="openstack/heat-cfnapi-c9b799fcd-wrw68" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.571818 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/291fbee7-ab14-439c-9bfc-845225d607ae-config-data-custom\") pod \"heat-cfnapi-c9b799fcd-wrw68\" (UID: \"291fbee7-ab14-439c-9bfc-845225d607ae\") " pod="openstack/heat-cfnapi-c9b799fcd-wrw68" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.574331 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/291fbee7-ab14-439c-9bfc-845225d607ae-config-data\") pod \"heat-cfnapi-c9b799fcd-wrw68\" (UID: \"291fbee7-ab14-439c-9bfc-845225d607ae\") " pod="openstack/heat-cfnapi-c9b799fcd-wrw68" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.590475 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhchp\" (UniqueName: \"kubernetes.io/projected/291fbee7-ab14-439c-9bfc-845225d607ae-kube-api-access-qhchp\") pod \"heat-cfnapi-c9b799fcd-wrw68\" (UID: \"291fbee7-ab14-439c-9bfc-845225d607ae\") " pod="openstack/heat-cfnapi-c9b799fcd-wrw68" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.662121 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-c9b799fcd-wrw68" Nov 24 13:41:26 crc kubenswrapper[5039]: I1124 13:41:26.849916 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 24 13:41:27 crc kubenswrapper[5039]: W1124 13:41:27.068962 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poded94ecdc_8218_45f3_b908_7a2410b57196.slice/crio-f250f7b576ac512a61366258108b02a5141b007c70cd0f3416cc175caf55df66 WatchSource:0}: Error finding container f250f7b576ac512a61366258108b02a5141b007c70cd0f3416cc175caf55df66: Status 404 returned error can't find the container with id f250f7b576ac512a61366258108b02a5141b007c70cd0f3416cc175caf55df66 Nov 24 13:41:27 crc kubenswrapper[5039]: I1124 13:41:27.072138 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-75cf4567b8-dlwgp"] Nov 24 13:41:27 crc kubenswrapper[5039]: I1124 13:41:27.186637 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-75cf4567b8-dlwgp" event={"ID":"ed94ecdc-8218-45f3-b908-7a2410b57196","Type":"ContainerStarted","Data":"f250f7b576ac512a61366258108b02a5141b007c70cd0f3416cc175caf55df66"} Nov 24 13:41:27 crc kubenswrapper[5039]: I1124 13:41:27.256083 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-86b5f44b95-4qtzc"] Nov 24 13:41:27 crc kubenswrapper[5039]: I1124 13:41:27.348585 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-c9b799fcd-wrw68"] Nov 24 13:41:27 crc kubenswrapper[5039]: W1124 13:41:27.349025 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod291fbee7_ab14_439c_9bfc_845225d607ae.slice/crio-f21bd9b13a51c06675c9e1b18ca0329c9529bd744822f7998a4beb5704d2c174 WatchSource:0}: Error finding container f21bd9b13a51c06675c9e1b18ca0329c9529bd744822f7998a4beb5704d2c174: Status 404 returned error can't find the container with id f21bd9b13a51c06675c9e1b18ca0329c9529bd744822f7998a4beb5704d2c174 Nov 24 13:41:28 crc kubenswrapper[5039]: I1124 13:41:28.239325 5039 generic.go:334] "Generic (PLEG): container finished" podID="291fbee7-ab14-439c-9bfc-845225d607ae" containerID="fee371b0e63687cc837b27f954f35e7a2df71906ed0a176798a5fb9032f81520" exitCode=1 Nov 24 13:41:28 crc kubenswrapper[5039]: I1124 13:41:28.239769 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-c9b799fcd-wrw68" event={"ID":"291fbee7-ab14-439c-9bfc-845225d607ae","Type":"ContainerDied","Data":"fee371b0e63687cc837b27f954f35e7a2df71906ed0a176798a5fb9032f81520"} Nov 24 13:41:28 crc kubenswrapper[5039]: I1124 13:41:28.239797 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-c9b799fcd-wrw68" event={"ID":"291fbee7-ab14-439c-9bfc-845225d607ae","Type":"ContainerStarted","Data":"f21bd9b13a51c06675c9e1b18ca0329c9529bd744822f7998a4beb5704d2c174"} Nov 24 13:41:28 crc kubenswrapper[5039]: I1124 13:41:28.240855 5039 scope.go:117] "RemoveContainer" containerID="fee371b0e63687cc837b27f954f35e7a2df71906ed0a176798a5fb9032f81520" Nov 24 13:41:28 crc kubenswrapper[5039]: I1124 13:41:28.246693 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-86b5f44b95-4qtzc" event={"ID":"09227c0c-ba64-4216-8bac-a8c0f88706c3","Type":"ContainerStarted","Data":"d0356b6d2a8303b39436390c1cbbd099ab52659f247b728dbc7a2b8928e5f042"} Nov 24 13:41:28 crc kubenswrapper[5039]: I1124 13:41:28.246743 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-86b5f44b95-4qtzc" event={"ID":"09227c0c-ba64-4216-8bac-a8c0f88706c3","Type":"ContainerStarted","Data":"802251b6c48a07463bfd79ba34e64a026619afda0553e2cdcc723cc67c2ef2e3"} Nov 24 13:41:28 crc kubenswrapper[5039]: I1124 13:41:28.247942 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-86b5f44b95-4qtzc" Nov 24 13:41:28 crc kubenswrapper[5039]: I1124 13:41:28.252582 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-75cf4567b8-dlwgp" event={"ID":"ed94ecdc-8218-45f3-b908-7a2410b57196","Type":"ContainerStarted","Data":"1fec2c123f26a5e34dff68769cbf1db41e66f70dea0ceeb0bd524545dedee7b4"} Nov 24 13:41:28 crc kubenswrapper[5039]: I1124 13:41:28.253938 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-75cf4567b8-dlwgp" Nov 24 13:41:28 crc kubenswrapper[5039]: I1124 13:41:28.303061 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-86b5f44b95-4qtzc" podStartSLOduration=2.30303986 podStartE2EDuration="2.30303986s" podCreationTimestamp="2025-11-24 13:41:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:41:28.284496803 +0000 UTC m=+1400.723621303" watchObservedRunningTime="2025-11-24 13:41:28.30303986 +0000 UTC m=+1400.742164360" Nov 24 13:41:28 crc kubenswrapper[5039]: I1124 13:41:28.324269 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-75cf4567b8-dlwgp" podStartSLOduration=2.324250463 podStartE2EDuration="2.324250463s" podCreationTimestamp="2025-11-24 13:41:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:41:28.313077498 +0000 UTC m=+1400.752201998" watchObservedRunningTime="2025-11-24 13:41:28.324250463 +0000 UTC m=+1400.763374963" Nov 24 13:41:28 crc kubenswrapper[5039]: I1124 13:41:28.330134 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:41:28 crc kubenswrapper[5039]: I1124 13:41:28.469362 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-p8jd7"] Nov 24 13:41:28 crc kubenswrapper[5039]: I1124 13:41:28.472754 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" podUID="0840f945-89d7-40d1-b8dc-629d32793a6c" containerName="dnsmasq-dns" containerID="cri-o://5c51c7415cbccf2387a2f7b1fa7d06b5a86ecb4b13d32e014a92b97de1f692d2" gracePeriod=10 Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.253370 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-58cdd4bdc9-hd6w5"] Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.253606 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-58cdd4bdc9-hd6w5" podUID="8bb936ec-11da-428d-93ed-33745690864a" containerName="heat-api" containerID="cri-o://9b027226f77dc29632f20c9f4ae36818bb0eeb4466f11dc8c62464099d548b0d" gracePeriod=60 Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.271552 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-58cdd4bdc9-hd6w5" podUID="8bb936ec-11da-428d-93ed-33745690864a" containerName="heat-api" probeResult="failure" output="Get \"http://10.217.0.199:8004/healthcheck\": EOF" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.284085 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-c9b799fcd-wrw68" event={"ID":"291fbee7-ab14-439c-9bfc-845225d607ae","Type":"ContainerStarted","Data":"251a8685a592e17b193cd81ef17ae5bc48f0554a7ee85b6b2c4faa3f1e0e1804"} Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.284569 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-c9b799fcd-wrw68" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.298199 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-554bdcfd44-79lq6"] Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.298274 5039 generic.go:334] "Generic (PLEG): container finished" podID="09227c0c-ba64-4216-8bac-a8c0f88706c3" containerID="d0356b6d2a8303b39436390c1cbbd099ab52659f247b728dbc7a2b8928e5f042" exitCode=1 Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.298335 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-86b5f44b95-4qtzc" event={"ID":"09227c0c-ba64-4216-8bac-a8c0f88706c3","Type":"ContainerDied","Data":"d0356b6d2a8303b39436390c1cbbd099ab52659f247b728dbc7a2b8928e5f042"} Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.298409 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-554bdcfd44-79lq6" podUID="fdafa1d4-1e80-420d-a2a1-4017bd9144be" containerName="heat-cfnapi" containerID="cri-o://fb38b8bf941b98e0d62d0bb1454a7e7f94676c78c2fe9268917699ed00e067d1" gracePeriod=60 Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.298989 5039 scope.go:117] "RemoveContainer" containerID="d0356b6d2a8303b39436390c1cbbd099ab52659f247b728dbc7a2b8928e5f042" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.307454 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-78c587fb4d-fl6qr"] Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.308788 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.309096 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-554bdcfd44-79lq6" podUID="fdafa1d4-1e80-420d-a2a1-4017bd9144be" containerName="heat-cfnapi" probeResult="failure" output="Get \"http://10.217.0.197:8000/healthcheck\": EOF" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.309213 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/heat-cfnapi-554bdcfd44-79lq6" podUID="fdafa1d4-1e80-420d-a2a1-4017bd9144be" containerName="heat-cfnapi" probeResult="failure" output="Get \"http://10.217.0.197:8000/healthcheck\": EOF" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.310809 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.313350 5039 generic.go:334] "Generic (PLEG): container finished" podID="0840f945-89d7-40d1-b8dc-629d32793a6c" containerID="5c51c7415cbccf2387a2f7b1fa7d06b5a86ecb4b13d32e014a92b97de1f692d2" exitCode=0 Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.314483 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" event={"ID":"0840f945-89d7-40d1-b8dc-629d32793a6c","Type":"ContainerDied","Data":"5c51c7415cbccf2387a2f7b1fa7d06b5a86ecb4b13d32e014a92b97de1f692d2"} Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.314887 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.354346 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/heat-api-58cdd4bdc9-hd6w5" podUID="8bb936ec-11da-428d-93ed-33745690864a" containerName="heat-api" probeResult="failure" output="Get \"http://10.217.0.199:8004/healthcheck\": EOF" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.359818 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-config-data\") pod \"heat-api-78c587fb4d-fl6qr\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.363552 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-public-tls-certs\") pod \"heat-api-78c587fb4d-fl6qr\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.363666 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlwdp\" (UniqueName: \"kubernetes.io/projected/d505257c-0bc2-427b-8f9a-e5333460f461-kube-api-access-dlwdp\") pod \"heat-api-78c587fb4d-fl6qr\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.363980 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-config-data-custom\") pod \"heat-api-78c587fb4d-fl6qr\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.364024 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-combined-ca-bundle\") pod \"heat-api-78c587fb4d-fl6qr\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.364060 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-internal-tls-certs\") pod \"heat-api-78c587fb4d-fl6qr\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.364210 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-78c587fb4d-fl6qr"] Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.392601 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-7f45f46b76-fckwv"] Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.394225 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.403860 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.404073 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.404251 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-c9b799fcd-wrw68" podStartSLOduration=3.40422801 podStartE2EDuration="3.40422801s" podCreationTimestamp="2025-11-24 13:41:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:41:29.341552364 +0000 UTC m=+1401.780676874" watchObservedRunningTime="2025-11-24 13:41:29.40422801 +0000 UTC m=+1401.843352510" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.440110 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7f45f46b76-fckwv"] Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.465714 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-combined-ca-bundle\") pod \"heat-cfnapi-7f45f46b76-fckwv\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.465766 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlwdp\" (UniqueName: \"kubernetes.io/projected/d505257c-0bc2-427b-8f9a-e5333460f461-kube-api-access-dlwdp\") pod \"heat-api-78c587fb4d-fl6qr\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.465838 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-config-data-custom\") pod \"heat-api-78c587fb4d-fl6qr\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.465881 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-combined-ca-bundle\") pod \"heat-api-78c587fb4d-fl6qr\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.465906 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bchxx\" (UniqueName: \"kubernetes.io/projected/88f5edac-dd13-4a09-97a0-60f263e60f23-kube-api-access-bchxx\") pod \"heat-cfnapi-7f45f46b76-fckwv\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.465930 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-internal-tls-certs\") pod \"heat-api-78c587fb4d-fl6qr\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.465962 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-config-data\") pod \"heat-api-78c587fb4d-fl6qr\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.465996 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-public-tls-certs\") pod \"heat-cfnapi-7f45f46b76-fckwv\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.466062 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-internal-tls-certs\") pod \"heat-cfnapi-7f45f46b76-fckwv\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.466084 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-config-data\") pod \"heat-cfnapi-7f45f46b76-fckwv\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.466187 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-public-tls-certs\") pod \"heat-api-78c587fb4d-fl6qr\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.466209 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-config-data-custom\") pod \"heat-cfnapi-7f45f46b76-fckwv\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.474276 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-config-data-custom\") pod \"heat-api-78c587fb4d-fl6qr\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.477670 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-config-data\") pod \"heat-api-78c587fb4d-fl6qr\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.483286 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-combined-ca-bundle\") pod \"heat-api-78c587fb4d-fl6qr\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.487855 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-internal-tls-certs\") pod \"heat-api-78c587fb4d-fl6qr\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.494112 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-public-tls-certs\") pod \"heat-api-78c587fb4d-fl6qr\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.502129 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlwdp\" (UniqueName: \"kubernetes.io/projected/d505257c-0bc2-427b-8f9a-e5333460f461-kube-api-access-dlwdp\") pod \"heat-api-78c587fb4d-fl6qr\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.567718 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bchxx\" (UniqueName: \"kubernetes.io/projected/88f5edac-dd13-4a09-97a0-60f263e60f23-kube-api-access-bchxx\") pod \"heat-cfnapi-7f45f46b76-fckwv\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.567806 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-public-tls-certs\") pod \"heat-cfnapi-7f45f46b76-fckwv\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.567856 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-internal-tls-certs\") pod \"heat-cfnapi-7f45f46b76-fckwv\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.567877 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-config-data\") pod \"heat-cfnapi-7f45f46b76-fckwv\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.567956 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-config-data-custom\") pod \"heat-cfnapi-7f45f46b76-fckwv\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.567975 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-combined-ca-bundle\") pod \"heat-cfnapi-7f45f46b76-fckwv\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.574221 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-config-data\") pod \"heat-cfnapi-7f45f46b76-fckwv\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.577322 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-combined-ca-bundle\") pod \"heat-cfnapi-7f45f46b76-fckwv\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.578194 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-config-data-custom\") pod \"heat-cfnapi-7f45f46b76-fckwv\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.582023 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-public-tls-certs\") pod \"heat-cfnapi-7f45f46b76-fckwv\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.586272 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bchxx\" (UniqueName: \"kubernetes.io/projected/88f5edac-dd13-4a09-97a0-60f263e60f23-kube-api-access-bchxx\") pod \"heat-cfnapi-7f45f46b76-fckwv\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.587686 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-internal-tls-certs\") pod \"heat-cfnapi-7f45f46b76-fckwv\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.720129 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:29 crc kubenswrapper[5039]: I1124 13:41:29.737745 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.133012 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" podUID="0840f945-89d7-40d1-b8dc-629d32793a6c" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.189:5353: connect: connection refused" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.152864 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-5b66587b55-thzjl"] Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.154872 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.157061 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.157120 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.157331 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.169299 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-5b66587b55-thzjl"] Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.288549 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5pwsg\" (UniqueName: \"kubernetes.io/projected/bd1bf6a5-309b-4960-8f37-34b006db3599-kube-api-access-5pwsg\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.288614 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd1bf6a5-309b-4960-8f37-34b006db3599-run-httpd\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.288640 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/bd1bf6a5-309b-4960-8f37-34b006db3599-etc-swift\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.288653 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd1bf6a5-309b-4960-8f37-34b006db3599-log-httpd\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.288729 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd1bf6a5-309b-4960-8f37-34b006db3599-internal-tls-certs\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.288754 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd1bf6a5-309b-4960-8f37-34b006db3599-combined-ca-bundle\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.288768 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd1bf6a5-309b-4960-8f37-34b006db3599-public-tls-certs\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.288812 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd1bf6a5-309b-4960-8f37-34b006db3599-config-data\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.345169 5039 generic.go:334] "Generic (PLEG): container finished" podID="291fbee7-ab14-439c-9bfc-845225d607ae" containerID="251a8685a592e17b193cd81ef17ae5bc48f0554a7ee85b6b2c4faa3f1e0e1804" exitCode=1 Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.345216 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-c9b799fcd-wrw68" event={"ID":"291fbee7-ab14-439c-9bfc-845225d607ae","Type":"ContainerDied","Data":"251a8685a592e17b193cd81ef17ae5bc48f0554a7ee85b6b2c4faa3f1e0e1804"} Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.345265 5039 scope.go:117] "RemoveContainer" containerID="fee371b0e63687cc837b27f954f35e7a2df71906ed0a176798a5fb9032f81520" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.346206 5039 scope.go:117] "RemoveContainer" containerID="251a8685a592e17b193cd81ef17ae5bc48f0554a7ee85b6b2c4faa3f1e0e1804" Nov 24 13:41:30 crc kubenswrapper[5039]: E1124 13:41:30.346590 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-c9b799fcd-wrw68_openstack(291fbee7-ab14-439c-9bfc-845225d607ae)\"" pod="openstack/heat-cfnapi-c9b799fcd-wrw68" podUID="291fbee7-ab14-439c-9bfc-845225d607ae" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.391121 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd1bf6a5-309b-4960-8f37-34b006db3599-run-httpd\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.391173 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/bd1bf6a5-309b-4960-8f37-34b006db3599-etc-swift\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.391212 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd1bf6a5-309b-4960-8f37-34b006db3599-log-httpd\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.391269 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd1bf6a5-309b-4960-8f37-34b006db3599-internal-tls-certs\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.391294 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd1bf6a5-309b-4960-8f37-34b006db3599-combined-ca-bundle\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.391308 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd1bf6a5-309b-4960-8f37-34b006db3599-public-tls-certs\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.391355 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd1bf6a5-309b-4960-8f37-34b006db3599-config-data\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.391446 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5pwsg\" (UniqueName: \"kubernetes.io/projected/bd1bf6a5-309b-4960-8f37-34b006db3599-kube-api-access-5pwsg\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.391762 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd1bf6a5-309b-4960-8f37-34b006db3599-run-httpd\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.392679 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd1bf6a5-309b-4960-8f37-34b006db3599-log-httpd\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.403426 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd1bf6a5-309b-4960-8f37-34b006db3599-public-tls-certs\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.403525 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd1bf6a5-309b-4960-8f37-34b006db3599-combined-ca-bundle\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.403634 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd1bf6a5-309b-4960-8f37-34b006db3599-config-data\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.404640 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/bd1bf6a5-309b-4960-8f37-34b006db3599-etc-swift\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.408173 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd1bf6a5-309b-4960-8f37-34b006db3599-internal-tls-certs\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.422261 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5pwsg\" (UniqueName: \"kubernetes.io/projected/bd1bf6a5-309b-4960-8f37-34b006db3599-kube-api-access-5pwsg\") pod \"swift-proxy-5b66587b55-thzjl\" (UID: \"bd1bf6a5-309b-4960-8f37-34b006db3599\") " pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:30 crc kubenswrapper[5039]: I1124 13:41:30.530899 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:31 crc kubenswrapper[5039]: I1124 13:41:31.354397 5039 scope.go:117] "RemoveContainer" containerID="251a8685a592e17b193cd81ef17ae5bc48f0554a7ee85b6b2c4faa3f1e0e1804" Nov 24 13:41:31 crc kubenswrapper[5039]: E1124 13:41:31.354944 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-c9b799fcd-wrw68_openstack(291fbee7-ab14-439c-9bfc-845225d607ae)\"" pod="openstack/heat-cfnapi-c9b799fcd-wrw68" podUID="291fbee7-ab14-439c-9bfc-845225d607ae" Nov 24 13:41:31 crc kubenswrapper[5039]: I1124 13:41:31.530850 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-86b5f44b95-4qtzc" Nov 24 13:41:31 crc kubenswrapper[5039]: I1124 13:41:31.577965 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:41:31 crc kubenswrapper[5039]: I1124 13:41:31.578278 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bb35c10f-5f1f-4175-9174-4696bada484a" containerName="ceilometer-central-agent" containerID="cri-o://7f876b92196080d7a8a1b72cb590a7aa6142c21222f40983a1db923eeee0aec2" gracePeriod=30 Nov 24 13:41:31 crc kubenswrapper[5039]: I1124 13:41:31.578807 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bb35c10f-5f1f-4175-9174-4696bada484a" containerName="proxy-httpd" containerID="cri-o://bfc41be41545f659adb793d89f7a38a9701c37ae9cf9302837c485713e46778b" gracePeriod=30 Nov 24 13:41:31 crc kubenswrapper[5039]: I1124 13:41:31.578870 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bb35c10f-5f1f-4175-9174-4696bada484a" containerName="sg-core" containerID="cri-o://034a273616dc694275fdcce5cfb64e6ed269c5c01da997861bc702be0bbaf3ad" gracePeriod=30 Nov 24 13:41:31 crc kubenswrapper[5039]: I1124 13:41:31.578917 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bb35c10f-5f1f-4175-9174-4696bada484a" containerName="ceilometer-notification-agent" containerID="cri-o://cad996b1bb6a4808da9181a9f0062555a1541578283f9207c4b33b5add39ff39" gracePeriod=30 Nov 24 13:41:31 crc kubenswrapper[5039]: I1124 13:41:31.662632 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-c9b799fcd-wrw68" Nov 24 13:41:31 crc kubenswrapper[5039]: E1124 13:41:31.845700 5039 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbb35c10f_5f1f_4175_9174_4696bada484a.slice/crio-034a273616dc694275fdcce5cfb64e6ed269c5c01da997861bc702be0bbaf3ad.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbb35c10f_5f1f_4175_9174_4696bada484a.slice/crio-bfc41be41545f659adb793d89f7a38a9701c37ae9cf9302837c485713e46778b.scope\": RecentStats: unable to find data in memory cache]" Nov 24 13:41:32 crc kubenswrapper[5039]: I1124 13:41:32.368486 5039 generic.go:334] "Generic (PLEG): container finished" podID="bb35c10f-5f1f-4175-9174-4696bada484a" containerID="bfc41be41545f659adb793d89f7a38a9701c37ae9cf9302837c485713e46778b" exitCode=0 Nov 24 13:41:32 crc kubenswrapper[5039]: I1124 13:41:32.368564 5039 generic.go:334] "Generic (PLEG): container finished" podID="bb35c10f-5f1f-4175-9174-4696bada484a" containerID="034a273616dc694275fdcce5cfb64e6ed269c5c01da997861bc702be0bbaf3ad" exitCode=2 Nov 24 13:41:32 crc kubenswrapper[5039]: I1124 13:41:32.368573 5039 generic.go:334] "Generic (PLEG): container finished" podID="bb35c10f-5f1f-4175-9174-4696bada484a" containerID="7f876b92196080d7a8a1b72cb590a7aa6142c21222f40983a1db923eeee0aec2" exitCode=0 Nov 24 13:41:32 crc kubenswrapper[5039]: I1124 13:41:32.368593 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb35c10f-5f1f-4175-9174-4696bada484a","Type":"ContainerDied","Data":"bfc41be41545f659adb793d89f7a38a9701c37ae9cf9302837c485713e46778b"} Nov 24 13:41:32 crc kubenswrapper[5039]: I1124 13:41:32.368642 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb35c10f-5f1f-4175-9174-4696bada484a","Type":"ContainerDied","Data":"034a273616dc694275fdcce5cfb64e6ed269c5c01da997861bc702be0bbaf3ad"} Nov 24 13:41:32 crc kubenswrapper[5039]: I1124 13:41:32.368657 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb35c10f-5f1f-4175-9174-4696bada484a","Type":"ContainerDied","Data":"7f876b92196080d7a8a1b72cb590a7aa6142c21222f40983a1db923eeee0aec2"} Nov 24 13:41:32 crc kubenswrapper[5039]: I1124 13:41:32.369298 5039 scope.go:117] "RemoveContainer" containerID="251a8685a592e17b193cd81ef17ae5bc48f0554a7ee85b6b2c4faa3f1e0e1804" Nov 24 13:41:32 crc kubenswrapper[5039]: E1124 13:41:32.369561 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-c9b799fcd-wrw68_openstack(291fbee7-ab14-439c-9bfc-845225d607ae)\"" pod="openstack/heat-cfnapi-c9b799fcd-wrw68" podUID="291fbee7-ab14-439c-9bfc-845225d607ae" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.210211 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-bdq8h"] Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.211638 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-bdq8h" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.222530 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-bdq8h"] Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.306323 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-7zj9p"] Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.315698 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-7zj9p" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.357626 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-7zj9p"] Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.366686 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a73195a6-8449-41fa-ad7c-5ce086c264ec-operator-scripts\") pod \"nova-api-db-create-bdq8h\" (UID: \"a73195a6-8449-41fa-ad7c-5ce086c264ec\") " pod="openstack/nova-api-db-create-bdq8h" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.366803 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mx2vv\" (UniqueName: \"kubernetes.io/projected/a73195a6-8449-41fa-ad7c-5ce086c264ec-kube-api-access-mx2vv\") pod \"nova-api-db-create-bdq8h\" (UID: \"a73195a6-8449-41fa-ad7c-5ce086c264ec\") " pod="openstack/nova-api-db-create-bdq8h" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.366914 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a41f29ac-696c-4aa9-aed7-b8959e15fa52-operator-scripts\") pod \"nova-cell0-db-create-7zj9p\" (UID: \"a41f29ac-696c-4aa9-aed7-b8959e15fa52\") " pod="openstack/nova-cell0-db-create-7zj9p" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.367253 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpvrw\" (UniqueName: \"kubernetes.io/projected/a41f29ac-696c-4aa9-aed7-b8959e15fa52-kube-api-access-wpvrw\") pod \"nova-cell0-db-create-7zj9p\" (UID: \"a41f29ac-696c-4aa9-aed7-b8959e15fa52\") " pod="openstack/nova-cell0-db-create-7zj9p" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.434058 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-2540-account-create-g8rgr"] Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.435447 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2540-account-create-g8rgr" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.439748 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.469636 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-5blmb"] Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.471046 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-5blmb" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.478288 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b1111a8-04ac-478b-b1bf-557246566f05-operator-scripts\") pod \"nova-api-2540-account-create-g8rgr\" (UID: \"4b1111a8-04ac-478b-b1bf-557246566f05\") " pod="openstack/nova-api-2540-account-create-g8rgr" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.479605 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzfvj\" (UniqueName: \"kubernetes.io/projected/4b1111a8-04ac-478b-b1bf-557246566f05-kube-api-access-dzfvj\") pod \"nova-api-2540-account-create-g8rgr\" (UID: \"4b1111a8-04ac-478b-b1bf-557246566f05\") " pod="openstack/nova-api-2540-account-create-g8rgr" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.479936 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a73195a6-8449-41fa-ad7c-5ce086c264ec-operator-scripts\") pod \"nova-api-db-create-bdq8h\" (UID: \"a73195a6-8449-41fa-ad7c-5ce086c264ec\") " pod="openstack/nova-api-db-create-bdq8h" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.480792 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mx2vv\" (UniqueName: \"kubernetes.io/projected/a73195a6-8449-41fa-ad7c-5ce086c264ec-kube-api-access-mx2vv\") pod \"nova-api-db-create-bdq8h\" (UID: \"a73195a6-8449-41fa-ad7c-5ce086c264ec\") " pod="openstack/nova-api-db-create-bdq8h" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.480917 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a41f29ac-696c-4aa9-aed7-b8959e15fa52-operator-scripts\") pod \"nova-cell0-db-create-7zj9p\" (UID: \"a41f29ac-696c-4aa9-aed7-b8959e15fa52\") " pod="openstack/nova-cell0-db-create-7zj9p" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.483599 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpvrw\" (UniqueName: \"kubernetes.io/projected/a41f29ac-696c-4aa9-aed7-b8959e15fa52-kube-api-access-wpvrw\") pod \"nova-cell0-db-create-7zj9p\" (UID: \"a41f29ac-696c-4aa9-aed7-b8959e15fa52\") " pod="openstack/nova-cell0-db-create-7zj9p" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.483360 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a41f29ac-696c-4aa9-aed7-b8959e15fa52-operator-scripts\") pod \"nova-cell0-db-create-7zj9p\" (UID: \"a41f29ac-696c-4aa9-aed7-b8959e15fa52\") " pod="openstack/nova-cell0-db-create-7zj9p" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.480713 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a73195a6-8449-41fa-ad7c-5ce086c264ec-operator-scripts\") pod \"nova-api-db-create-bdq8h\" (UID: \"a73195a6-8449-41fa-ad7c-5ce086c264ec\") " pod="openstack/nova-api-db-create-bdq8h" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.518865 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-2540-account-create-g8rgr"] Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.527629 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-5blmb"] Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.545239 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpvrw\" (UniqueName: \"kubernetes.io/projected/a41f29ac-696c-4aa9-aed7-b8959e15fa52-kube-api-access-wpvrw\") pod \"nova-cell0-db-create-7zj9p\" (UID: \"a41f29ac-696c-4aa9-aed7-b8959e15fa52\") " pod="openstack/nova-cell0-db-create-7zj9p" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.586313 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b1111a8-04ac-478b-b1bf-557246566f05-operator-scripts\") pod \"nova-api-2540-account-create-g8rgr\" (UID: \"4b1111a8-04ac-478b-b1bf-557246566f05\") " pod="openstack/nova-api-2540-account-create-g8rgr" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.586359 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzfvj\" (UniqueName: \"kubernetes.io/projected/4b1111a8-04ac-478b-b1bf-557246566f05-kube-api-access-dzfvj\") pod \"nova-api-2540-account-create-g8rgr\" (UID: \"4b1111a8-04ac-478b-b1bf-557246566f05\") " pod="openstack/nova-api-2540-account-create-g8rgr" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.586385 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nj44z\" (UniqueName: \"kubernetes.io/projected/11fd95de-91de-400f-a931-ca7339de0a76-kube-api-access-nj44z\") pod \"nova-cell1-db-create-5blmb\" (UID: \"11fd95de-91de-400f-a931-ca7339de0a76\") " pod="openstack/nova-cell1-db-create-5blmb" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.586523 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11fd95de-91de-400f-a931-ca7339de0a76-operator-scripts\") pod \"nova-cell1-db-create-5blmb\" (UID: \"11fd95de-91de-400f-a931-ca7339de0a76\") " pod="openstack/nova-cell1-db-create-5blmb" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.587283 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b1111a8-04ac-478b-b1bf-557246566f05-operator-scripts\") pod \"nova-api-2540-account-create-g8rgr\" (UID: \"4b1111a8-04ac-478b-b1bf-557246566f05\") " pod="openstack/nova-api-2540-account-create-g8rgr" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.587366 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mx2vv\" (UniqueName: \"kubernetes.io/projected/a73195a6-8449-41fa-ad7c-5ce086c264ec-kube-api-access-mx2vv\") pod \"nova-api-db-create-bdq8h\" (UID: \"a73195a6-8449-41fa-ad7c-5ce086c264ec\") " pod="openstack/nova-api-db-create-bdq8h" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.619847 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzfvj\" (UniqueName: \"kubernetes.io/projected/4b1111a8-04ac-478b-b1bf-557246566f05-kube-api-access-dzfvj\") pod \"nova-api-2540-account-create-g8rgr\" (UID: \"4b1111a8-04ac-478b-b1bf-557246566f05\") " pod="openstack/nova-api-2540-account-create-g8rgr" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.644779 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-ba86-account-create-zsw6z"] Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.646829 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ba86-account-create-zsw6z" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.648843 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.670004 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-7zj9p" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.677298 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-ba86-account-create-zsw6z"] Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.690702 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11fd95de-91de-400f-a931-ca7339de0a76-operator-scripts\") pod \"nova-cell1-db-create-5blmb\" (UID: \"11fd95de-91de-400f-a931-ca7339de0a76\") " pod="openstack/nova-cell1-db-create-5blmb" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.690798 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nj44z\" (UniqueName: \"kubernetes.io/projected/11fd95de-91de-400f-a931-ca7339de0a76-kube-api-access-nj44z\") pod \"nova-cell1-db-create-5blmb\" (UID: \"11fd95de-91de-400f-a931-ca7339de0a76\") " pod="openstack/nova-cell1-db-create-5blmb" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.690853 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a18b1393-b27b-42f7-938d-cf3321f376d6-operator-scripts\") pod \"nova-cell0-ba86-account-create-zsw6z\" (UID: \"a18b1393-b27b-42f7-938d-cf3321f376d6\") " pod="openstack/nova-cell0-ba86-account-create-zsw6z" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.690872 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j522v\" (UniqueName: \"kubernetes.io/projected/a18b1393-b27b-42f7-938d-cf3321f376d6-kube-api-access-j522v\") pod \"nova-cell0-ba86-account-create-zsw6z\" (UID: \"a18b1393-b27b-42f7-938d-cf3321f376d6\") " pod="openstack/nova-cell0-ba86-account-create-zsw6z" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.691459 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11fd95de-91de-400f-a931-ca7339de0a76-operator-scripts\") pod \"nova-cell1-db-create-5blmb\" (UID: \"11fd95de-91de-400f-a931-ca7339de0a76\") " pod="openstack/nova-cell1-db-create-5blmb" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.740395 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nj44z\" (UniqueName: \"kubernetes.io/projected/11fd95de-91de-400f-a931-ca7339de0a76-kube-api-access-nj44z\") pod \"nova-cell1-db-create-5blmb\" (UID: \"11fd95de-91de-400f-a931-ca7339de0a76\") " pod="openstack/nova-cell1-db-create-5blmb" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.770999 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2540-account-create-g8rgr" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.795062 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a18b1393-b27b-42f7-938d-cf3321f376d6-operator-scripts\") pod \"nova-cell0-ba86-account-create-zsw6z\" (UID: \"a18b1393-b27b-42f7-938d-cf3321f376d6\") " pod="openstack/nova-cell0-ba86-account-create-zsw6z" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.795120 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j522v\" (UniqueName: \"kubernetes.io/projected/a18b1393-b27b-42f7-938d-cf3321f376d6-kube-api-access-j522v\") pod \"nova-cell0-ba86-account-create-zsw6z\" (UID: \"a18b1393-b27b-42f7-938d-cf3321f376d6\") " pod="openstack/nova-cell0-ba86-account-create-zsw6z" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.796269 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a18b1393-b27b-42f7-938d-cf3321f376d6-operator-scripts\") pod \"nova-cell0-ba86-account-create-zsw6z\" (UID: \"a18b1393-b27b-42f7-938d-cf3321f376d6\") " pod="openstack/nova-cell0-ba86-account-create-zsw6z" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.799522 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-5blmb" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.828185 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j522v\" (UniqueName: \"kubernetes.io/projected/a18b1393-b27b-42f7-938d-cf3321f376d6-kube-api-access-j522v\") pod \"nova-cell0-ba86-account-create-zsw6z\" (UID: \"a18b1393-b27b-42f7-938d-cf3321f376d6\") " pod="openstack/nova-cell0-ba86-account-create-zsw6z" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.850070 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-bdq8h" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.864543 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-69e6-account-create-5kzxp"] Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.865984 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-69e6-account-create-5kzxp" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.876048 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 24 13:41:33 crc kubenswrapper[5039]: I1124 13:41:33.883019 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-69e6-account-create-5kzxp"] Nov 24 13:41:34 crc kubenswrapper[5039]: I1124 13:41:34.000650 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mz56g\" (UniqueName: \"kubernetes.io/projected/6721127c-79a1-4fd5-98db-0e99ff78de0e-kube-api-access-mz56g\") pod \"nova-cell1-69e6-account-create-5kzxp\" (UID: \"6721127c-79a1-4fd5-98db-0e99ff78de0e\") " pod="openstack/nova-cell1-69e6-account-create-5kzxp" Nov 24 13:41:34 crc kubenswrapper[5039]: I1124 13:41:34.000856 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6721127c-79a1-4fd5-98db-0e99ff78de0e-operator-scripts\") pod \"nova-cell1-69e6-account-create-5kzxp\" (UID: \"6721127c-79a1-4fd5-98db-0e99ff78de0e\") " pod="openstack/nova-cell1-69e6-account-create-5kzxp" Nov 24 13:41:34 crc kubenswrapper[5039]: I1124 13:41:34.028666 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ba86-account-create-zsw6z" Nov 24 13:41:34 crc kubenswrapper[5039]: I1124 13:41:34.103050 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6721127c-79a1-4fd5-98db-0e99ff78de0e-operator-scripts\") pod \"nova-cell1-69e6-account-create-5kzxp\" (UID: \"6721127c-79a1-4fd5-98db-0e99ff78de0e\") " pod="openstack/nova-cell1-69e6-account-create-5kzxp" Nov 24 13:41:34 crc kubenswrapper[5039]: I1124 13:41:34.103109 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mz56g\" (UniqueName: \"kubernetes.io/projected/6721127c-79a1-4fd5-98db-0e99ff78de0e-kube-api-access-mz56g\") pod \"nova-cell1-69e6-account-create-5kzxp\" (UID: \"6721127c-79a1-4fd5-98db-0e99ff78de0e\") " pod="openstack/nova-cell1-69e6-account-create-5kzxp" Nov 24 13:41:34 crc kubenswrapper[5039]: I1124 13:41:34.103854 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6721127c-79a1-4fd5-98db-0e99ff78de0e-operator-scripts\") pod \"nova-cell1-69e6-account-create-5kzxp\" (UID: \"6721127c-79a1-4fd5-98db-0e99ff78de0e\") " pod="openstack/nova-cell1-69e6-account-create-5kzxp" Nov 24 13:41:34 crc kubenswrapper[5039]: I1124 13:41:34.125224 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mz56g\" (UniqueName: \"kubernetes.io/projected/6721127c-79a1-4fd5-98db-0e99ff78de0e-kube-api-access-mz56g\") pod \"nova-cell1-69e6-account-create-5kzxp\" (UID: \"6721127c-79a1-4fd5-98db-0e99ff78de0e\") " pod="openstack/nova-cell1-69e6-account-create-5kzxp" Nov 24 13:41:34 crc kubenswrapper[5039]: I1124 13:41:34.234617 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-69e6-account-create-5kzxp" Nov 24 13:41:34 crc kubenswrapper[5039]: I1124 13:41:34.679968 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-58cdd4bdc9-hd6w5" podUID="8bb936ec-11da-428d-93ed-33745690864a" containerName="heat-api" probeResult="failure" output="Get \"http://10.217.0.199:8004/healthcheck\": read tcp 10.217.0.2:53396->10.217.0.199:8004: read: connection reset by peer" Nov 24 13:41:34 crc kubenswrapper[5039]: I1124 13:41:34.681055 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-58cdd4bdc9-hd6w5" podUID="8bb936ec-11da-428d-93ed-33745690864a" containerName="heat-api" probeResult="failure" output="Get \"http://10.217.0.199:8004/healthcheck\": dial tcp 10.217.0.199:8004: connect: connection refused" Nov 24 13:41:34 crc kubenswrapper[5039]: I1124 13:41:34.775356 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-554bdcfd44-79lq6" podUID="fdafa1d4-1e80-420d-a2a1-4017bd9144be" containerName="heat-cfnapi" probeResult="failure" output="Get \"http://10.217.0.197:8000/healthcheck\": read tcp 10.217.0.2:40200->10.217.0.197:8000: read: connection reset by peer" Nov 24 13:41:34 crc kubenswrapper[5039]: I1124 13:41:34.775875 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-554bdcfd44-79lq6" podUID="fdafa1d4-1e80-420d-a2a1-4017bd9144be" containerName="heat-cfnapi" probeResult="failure" output="Get \"http://10.217.0.197:8000/healthcheck\": dial tcp 10.217.0.197:8000: connect: connection refused" Nov 24 13:41:35 crc kubenswrapper[5039]: I1124 13:41:35.107936 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7bdd5bd5df-sqgnq" Nov 24 13:41:35 crc kubenswrapper[5039]: I1124 13:41:35.133528 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" podUID="0840f945-89d7-40d1-b8dc-629d32793a6c" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.189:5353: connect: connection refused" Nov 24 13:41:35 crc kubenswrapper[5039]: I1124 13:41:35.166180 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-c4b94795b-c6c2f"] Nov 24 13:41:35 crc kubenswrapper[5039]: I1124 13:41:35.166468 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-c4b94795b-c6c2f" podUID="7fe0bf4f-b6f8-48c0-b772-587a715e6c27" containerName="neutron-api" containerID="cri-o://8437e26cb6893af74d085f476e072984c99df391d9ee19a4d901cea208fdde73" gracePeriod=30 Nov 24 13:41:35 crc kubenswrapper[5039]: I1124 13:41:35.166643 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-c4b94795b-c6c2f" podUID="7fe0bf4f-b6f8-48c0-b772-587a715e6c27" containerName="neutron-httpd" containerID="cri-o://eb2f7267049802178c3dd0c7578422d8ddad0e06220f23d0572624afbd97cc53" gracePeriod=30 Nov 24 13:41:35 crc kubenswrapper[5039]: I1124 13:41:35.432402 5039 generic.go:334] "Generic (PLEG): container finished" podID="bb35c10f-5f1f-4175-9174-4696bada484a" containerID="cad996b1bb6a4808da9181a9f0062555a1541578283f9207c4b33b5add39ff39" exitCode=0 Nov 24 13:41:35 crc kubenswrapper[5039]: I1124 13:41:35.432448 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb35c10f-5f1f-4175-9174-4696bada484a","Type":"ContainerDied","Data":"cad996b1bb6a4808da9181a9f0062555a1541578283f9207c4b33b5add39ff39"} Nov 24 13:41:35 crc kubenswrapper[5039]: I1124 13:41:35.435395 5039 generic.go:334] "Generic (PLEG): container finished" podID="fdafa1d4-1e80-420d-a2a1-4017bd9144be" containerID="fb38b8bf941b98e0d62d0bb1454a7e7f94676c78c2fe9268917699ed00e067d1" exitCode=0 Nov 24 13:41:35 crc kubenswrapper[5039]: I1124 13:41:35.435475 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-554bdcfd44-79lq6" event={"ID":"fdafa1d4-1e80-420d-a2a1-4017bd9144be","Type":"ContainerDied","Data":"fb38b8bf941b98e0d62d0bb1454a7e7f94676c78c2fe9268917699ed00e067d1"} Nov 24 13:41:35 crc kubenswrapper[5039]: I1124 13:41:35.437768 5039 generic.go:334] "Generic (PLEG): container finished" podID="8bb936ec-11da-428d-93ed-33745690864a" containerID="9b027226f77dc29632f20c9f4ae36818bb0eeb4466f11dc8c62464099d548b0d" exitCode=0 Nov 24 13:41:35 crc kubenswrapper[5039]: I1124 13:41:35.437823 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-58cdd4bdc9-hd6w5" event={"ID":"8bb936ec-11da-428d-93ed-33745690864a","Type":"ContainerDied","Data":"9b027226f77dc29632f20c9f4ae36818bb0eeb4466f11dc8c62464099d548b0d"} Nov 24 13:41:35 crc kubenswrapper[5039]: I1124 13:41:35.439472 5039 generic.go:334] "Generic (PLEG): container finished" podID="7fe0bf4f-b6f8-48c0-b772-587a715e6c27" containerID="eb2f7267049802178c3dd0c7578422d8ddad0e06220f23d0572624afbd97cc53" exitCode=0 Nov 24 13:41:35 crc kubenswrapper[5039]: I1124 13:41:35.439521 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c4b94795b-c6c2f" event={"ID":"7fe0bf4f-b6f8-48c0-b772-587a715e6c27","Type":"ContainerDied","Data":"eb2f7267049802178c3dd0c7578422d8ddad0e06220f23d0572624afbd97cc53"} Nov 24 13:41:37 crc kubenswrapper[5039]: I1124 13:41:37.974623 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-554bdcfd44-79lq6" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.100138 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-78dc996954-b5s9v" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.104261 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s6bjw\" (UniqueName: \"kubernetes.io/projected/fdafa1d4-1e80-420d-a2a1-4017bd9144be-kube-api-access-s6bjw\") pod \"fdafa1d4-1e80-420d-a2a1-4017bd9144be\" (UID: \"fdafa1d4-1e80-420d-a2a1-4017bd9144be\") " Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.104337 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdafa1d4-1e80-420d-a2a1-4017bd9144be-combined-ca-bundle\") pod \"fdafa1d4-1e80-420d-a2a1-4017bd9144be\" (UID: \"fdafa1d4-1e80-420d-a2a1-4017bd9144be\") " Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.104798 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fdafa1d4-1e80-420d-a2a1-4017bd9144be-config-data-custom\") pod \"fdafa1d4-1e80-420d-a2a1-4017bd9144be\" (UID: \"fdafa1d4-1e80-420d-a2a1-4017bd9144be\") " Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.104987 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdafa1d4-1e80-420d-a2a1-4017bd9144be-config-data\") pod \"fdafa1d4-1e80-420d-a2a1-4017bd9144be\" (UID: \"fdafa1d4-1e80-420d-a2a1-4017bd9144be\") " Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.123508 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fdafa1d4-1e80-420d-a2a1-4017bd9144be-kube-api-access-s6bjw" (OuterVolumeSpecName: "kube-api-access-s6bjw") pod "fdafa1d4-1e80-420d-a2a1-4017bd9144be" (UID: "fdafa1d4-1e80-420d-a2a1-4017bd9144be"). InnerVolumeSpecName "kube-api-access-s6bjw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.129561 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdafa1d4-1e80-420d-a2a1-4017bd9144be-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "fdafa1d4-1e80-420d-a2a1-4017bd9144be" (UID: "fdafa1d4-1e80-420d-a2a1-4017bd9144be"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.162429 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdafa1d4-1e80-420d-a2a1-4017bd9144be-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fdafa1d4-1e80-420d-a2a1-4017bd9144be" (UID: "fdafa1d4-1e80-420d-a2a1-4017bd9144be"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.207957 5039 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fdafa1d4-1e80-420d-a2a1-4017bd9144be-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.207987 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s6bjw\" (UniqueName: \"kubernetes.io/projected/fdafa1d4-1e80-420d-a2a1-4017bd9144be-kube-api-access-s6bjw\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.208000 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdafa1d4-1e80-420d-a2a1-4017bd9144be-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.220345 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdafa1d4-1e80-420d-a2a1-4017bd9144be-config-data" (OuterVolumeSpecName: "config-data") pod "fdafa1d4-1e80-420d-a2a1-4017bd9144be" (UID: "fdafa1d4-1e80-420d-a2a1-4017bd9144be"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.263213 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.315057 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-dns-swift-storage-0\") pod \"0840f945-89d7-40d1-b8dc-629d32793a6c\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.316225 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-ovsdbserver-nb\") pod \"0840f945-89d7-40d1-b8dc-629d32793a6c\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.316315 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nz25p\" (UniqueName: \"kubernetes.io/projected/0840f945-89d7-40d1-b8dc-629d32793a6c-kube-api-access-nz25p\") pod \"0840f945-89d7-40d1-b8dc-629d32793a6c\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.316433 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-ovsdbserver-sb\") pod \"0840f945-89d7-40d1-b8dc-629d32793a6c\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.316510 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-config\") pod \"0840f945-89d7-40d1-b8dc-629d32793a6c\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.316617 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-dns-svc\") pod \"0840f945-89d7-40d1-b8dc-629d32793a6c\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.321019 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdafa1d4-1e80-420d-a2a1-4017bd9144be-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.340482 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.392832 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0840f945-89d7-40d1-b8dc-629d32793a6c-kube-api-access-nz25p" (OuterVolumeSpecName: "kube-api-access-nz25p") pod "0840f945-89d7-40d1-b8dc-629d32793a6c" (UID: "0840f945-89d7-40d1-b8dc-629d32793a6c"). InnerVolumeSpecName "kube-api-access-nz25p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.424381 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-config-data\") pod \"bb35c10f-5f1f-4175-9174-4696bada484a\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.424646 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g27qb\" (UniqueName: \"kubernetes.io/projected/bb35c10f-5f1f-4175-9174-4696bada484a-kube-api-access-g27qb\") pod \"bb35c10f-5f1f-4175-9174-4696bada484a\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.424812 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb35c10f-5f1f-4175-9174-4696bada484a-log-httpd\") pod \"bb35c10f-5f1f-4175-9174-4696bada484a\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.424832 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-combined-ca-bundle\") pod \"bb35c10f-5f1f-4175-9174-4696bada484a\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.424870 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb35c10f-5f1f-4175-9174-4696bada484a-run-httpd\") pod \"bb35c10f-5f1f-4175-9174-4696bada484a\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.424897 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-sg-core-conf-yaml\") pod \"bb35c10f-5f1f-4175-9174-4696bada484a\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.424934 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-scripts\") pod \"bb35c10f-5f1f-4175-9174-4696bada484a\" (UID: \"bb35c10f-5f1f-4175-9174-4696bada484a\") " Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.425361 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nz25p\" (UniqueName: \"kubernetes.io/projected/0840f945-89d7-40d1-b8dc-629d32793a6c-kube-api-access-nz25p\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.442945 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb35c10f-5f1f-4175-9174-4696bada484a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "bb35c10f-5f1f-4175-9174-4696bada484a" (UID: "bb35c10f-5f1f-4175-9174-4696bada484a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.454344 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-58cdd4bdc9-hd6w5" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.459880 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb35c10f-5f1f-4175-9174-4696bada484a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "bb35c10f-5f1f-4175-9174-4696bada484a" (UID: "bb35c10f-5f1f-4175-9174-4696bada484a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.532835 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q6xvm\" (UniqueName: \"kubernetes.io/projected/8bb936ec-11da-428d-93ed-33745690864a-kube-api-access-q6xvm\") pod \"8bb936ec-11da-428d-93ed-33745690864a\" (UID: \"8bb936ec-11da-428d-93ed-33745690864a\") " Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.532952 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bb936ec-11da-428d-93ed-33745690864a-combined-ca-bundle\") pod \"8bb936ec-11da-428d-93ed-33745690864a\" (UID: \"8bb936ec-11da-428d-93ed-33745690864a\") " Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.533205 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bb936ec-11da-428d-93ed-33745690864a-config-data\") pod \"8bb936ec-11da-428d-93ed-33745690864a\" (UID: \"8bb936ec-11da-428d-93ed-33745690864a\") " Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.533270 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8bb936ec-11da-428d-93ed-33745690864a-config-data-custom\") pod \"8bb936ec-11da-428d-93ed-33745690864a\" (UID: \"8bb936ec-11da-428d-93ed-33745690864a\") " Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.533816 5039 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb35c10f-5f1f-4175-9174-4696bada484a-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.533831 5039 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb35c10f-5f1f-4175-9174-4696bada484a-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.539343 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb35c10f-5f1f-4175-9174-4696bada484a-kube-api-access-g27qb" (OuterVolumeSpecName: "kube-api-access-g27qb") pod "bb35c10f-5f1f-4175-9174-4696bada484a" (UID: "bb35c10f-5f1f-4175-9174-4696bada484a"). InnerVolumeSpecName "kube-api-access-g27qb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.567432 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-scripts" (OuterVolumeSpecName: "scripts") pod "bb35c10f-5f1f-4175-9174-4696bada484a" (UID: "bb35c10f-5f1f-4175-9174-4696bada484a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.568227 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.580872 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-58cdd4bdc9-hd6w5" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.594685 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bb936ec-11da-428d-93ed-33745690864a-kube-api-access-q6xvm" (OuterVolumeSpecName: "kube-api-access-q6xvm") pod "8bb936ec-11da-428d-93ed-33745690864a" (UID: "8bb936ec-11da-428d-93ed-33745690864a"). InnerVolumeSpecName "kube-api-access-q6xvm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.617905 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.620159 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bb936ec-11da-428d-93ed-33745690864a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "8bb936ec-11da-428d-93ed-33745690864a" (UID: "8bb936ec-11da-428d-93ed-33745690864a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.635838 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-554bdcfd44-79lq6" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.642617 5039 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8bb936ec-11da-428d-93ed-33745690864a-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.644405 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q6xvm\" (UniqueName: \"kubernetes.io/projected/8bb936ec-11da-428d-93ed-33745690864a-kube-api-access-q6xvm\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.644421 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.644433 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g27qb\" (UniqueName: \"kubernetes.io/projected/bb35c10f-5f1f-4175-9174-4696bada484a-kube-api-access-g27qb\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.656328 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.860136651 podStartE2EDuration="21.656303816s" podCreationTimestamp="2025-11-24 13:41:17 +0000 UTC" firstStartedPulling="2025-11-24 13:41:18.928693029 +0000 UTC m=+1391.367817529" lastFinishedPulling="2025-11-24 13:41:37.724860194 +0000 UTC m=+1410.163984694" observedRunningTime="2025-11-24 13:41:38.649254722 +0000 UTC m=+1411.088379232" watchObservedRunningTime="2025-11-24 13:41:38.656303816 +0000 UTC m=+1411.095428336" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.870683 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "bb35c10f-5f1f-4175-9174-4696bada484a" (UID: "bb35c10f-5f1f-4175-9174-4696bada484a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.902170 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0840f945-89d7-40d1-b8dc-629d32793a6c" (UID: "0840f945-89d7-40d1-b8dc-629d32793a6c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.907611 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bb936ec-11da-428d-93ed-33745690864a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8bb936ec-11da-428d-93ed-33745690864a" (UID: "8bb936ec-11da-428d-93ed-33745690864a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.911244 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "0840f945-89d7-40d1-b8dc-629d32793a6c" (UID: "0840f945-89d7-40d1-b8dc-629d32793a6c"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.942430 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0840f945-89d7-40d1-b8dc-629d32793a6c" (UID: "0840f945-89d7-40d1-b8dc-629d32793a6c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.954170 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0840f945-89d7-40d1-b8dc-629d32793a6c" (UID: "0840f945-89d7-40d1-b8dc-629d32793a6c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.954466 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-dns-svc\") pod \"0840f945-89d7-40d1-b8dc-629d32793a6c\" (UID: \"0840f945-89d7-40d1-b8dc-629d32793a6c\") " Nov 24 13:41:38 crc kubenswrapper[5039]: W1124 13:41:38.954880 5039 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/0840f945-89d7-40d1-b8dc-629d32793a6c/volumes/kubernetes.io~configmap/dns-svc Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.954902 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0840f945-89d7-40d1-b8dc-629d32793a6c" (UID: "0840f945-89d7-40d1-b8dc-629d32793a6c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.955820 5039 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.955848 5039 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.955863 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bb936ec-11da-428d-93ed-33745690864a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.955877 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.955890 5039 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.955903 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:38 crc kubenswrapper[5039]: I1124 13:41:38.961632 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-config" (OuterVolumeSpecName: "config") pod "0840f945-89d7-40d1-b8dc-629d32793a6c" (UID: "0840f945-89d7-40d1-b8dc-629d32793a6c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.019628 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-p8jd7" event={"ID":"0840f945-89d7-40d1-b8dc-629d32793a6c","Type":"ContainerDied","Data":"e9db6f74f933c7a73061adadbe8e9f0a39dfbd1b2111ae82b5c5f525146970eb"} Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.019691 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-58cdd4bdc9-hd6w5" event={"ID":"8bb936ec-11da-428d-93ed-33745690864a","Type":"ContainerDied","Data":"3a471328be1bf8d72c78e1fb1764afe1fc8c88c920cb3e2354d04cd92dc8d4bc"} Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.019710 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb35c10f-5f1f-4175-9174-4696bada484a","Type":"ContainerDied","Data":"383cd41ac44c200f38ed0cbe4d9b94ac1b1944385a3ebaa0c236b92672a41e0d"} Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.019724 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"bbd0fae4-aa15-46d2-8118-f738c3c1dc3c","Type":"ContainerStarted","Data":"ea98b4316cfee684b070833a76e3292a6c568f0c8ec7e470c30bb068339e3624"} Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.019737 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-554bdcfd44-79lq6" event={"ID":"fdafa1d4-1e80-420d-a2a1-4017bd9144be","Type":"ContainerDied","Data":"5d356064884c86b86ca0d54702d0914e708e78c337129e9c72e8f0bdd9c0fdea"} Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.020135 5039 scope.go:117] "RemoveContainer" containerID="5c51c7415cbccf2387a2f7b1fa7d06b5a86ecb4b13d32e014a92b97de1f692d2" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.021470 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7f45f46b76-fckwv"] Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.031284 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-bdq8h"] Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.055435 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bb936ec-11da-428d-93ed-33745690864a-config-data" (OuterVolumeSpecName: "config-data") pod "8bb936ec-11da-428d-93ed-33745690864a" (UID: "8bb936ec-11da-428d-93ed-33745690864a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.057286 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0840f945-89d7-40d1-b8dc-629d32793a6c-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.061648 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bb35c10f-5f1f-4175-9174-4696bada484a" (UID: "bb35c10f-5f1f-4175-9174-4696bada484a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.072494 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-2540-account-create-g8rgr"] Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.091683 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-554bdcfd44-79lq6"] Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.128780 5039 scope.go:117] "RemoveContainer" containerID="d3a6a7d78e5f185a361db46d2d8e366e428a7610066b106ac68d1fd802dbab76" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.129734 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-554bdcfd44-79lq6"] Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.162162 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.162214 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bb936ec-11da-428d-93ed-33745690864a-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.297675 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-config-data" (OuterVolumeSpecName: "config-data") pod "bb35c10f-5f1f-4175-9174-4696bada484a" (UID: "bb35c10f-5f1f-4175-9174-4696bada484a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.300675 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-p8jd7"] Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.372643 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb35c10f-5f1f-4175-9174-4696bada484a-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.382283 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-p8jd7"] Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.435674 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-5blmb"] Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.505590 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-58cdd4bdc9-hd6w5"] Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.520014 5039 scope.go:117] "RemoveContainer" containerID="9b027226f77dc29632f20c9f4ae36818bb0eeb4466f11dc8c62464099d548b0d" Nov 24 13:41:39 crc kubenswrapper[5039]: W1124 13:41:39.563679 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd505257c_0bc2_427b_8f9a_e5333460f461.slice/crio-f3d86bd90a0ba7915d5ab7d03f0b5471c33a79b941f3525024485ce5e0107975 WatchSource:0}: Error finding container f3d86bd90a0ba7915d5ab7d03f0b5471c33a79b941f3525024485ce5e0107975: Status 404 returned error can't find the container with id f3d86bd90a0ba7915d5ab7d03f0b5471c33a79b941f3525024485ce5e0107975 Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.608807 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-58cdd4bdc9-hd6w5"] Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.659870 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-ba86-account-create-zsw6z"] Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.687229 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-78c587fb4d-fl6qr"] Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.693729 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ba86-account-create-zsw6z" event={"ID":"a18b1393-b27b-42f7-938d-cf3321f376d6","Type":"ContainerStarted","Data":"9300f7405833a20aba0b8d3f189fc0f23dcd6ca4676a1d6d2016c3f13195cf6b"} Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.711260 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-69e6-account-create-5kzxp"] Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.711308 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-5blmb" event={"ID":"11fd95de-91de-400f-a931-ca7339de0a76","Type":"ContainerStarted","Data":"e057b720f52694881cab5be2c34eae1b8b2581202b99746fa0405680fd3f9e74"} Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.719233 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-bdq8h" event={"ID":"a73195a6-8449-41fa-ad7c-5ce086c264ec","Type":"ContainerStarted","Data":"f74cca513ac60034931a8210d0f24d4e718563299cf10cec60b14f3162038f92"} Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.726231 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7f45f46b76-fckwv" event={"ID":"88f5edac-dd13-4a09-97a0-60f263e60f23","Type":"ContainerStarted","Data":"36b5746515e23e1fb92ebea5cdd7aab51f484e14e7ef0b4df1bcd43acff19c90"} Nov 24 13:41:39 crc kubenswrapper[5039]: W1124 13:41:39.727594 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6721127c_79a1_4fd5_98db_0e99ff78de0e.slice/crio-bc3d994234b1e0efda072dd77c5f10129d3f00dfb7495fea5a8c8d1f6fad8f20 WatchSource:0}: Error finding container bc3d994234b1e0efda072dd77c5f10129d3f00dfb7495fea5a8c8d1f6fad8f20: Status 404 returned error can't find the container with id bc3d994234b1e0efda072dd77c5f10129d3f00dfb7495fea5a8c8d1f6fad8f20 Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.740731 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2540-account-create-g8rgr" event={"ID":"4b1111a8-04ac-478b-b1bf-557246566f05","Type":"ContainerStarted","Data":"1b789aac647b6c3d503dc10cc2688731ee2f03ce6c0b78d01616855d4fda0f17"} Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.747047 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-78c587fb4d-fl6qr" event={"ID":"d505257c-0bc2-427b-8f9a-e5333460f461","Type":"ContainerStarted","Data":"f3d86bd90a0ba7915d5ab7d03f0b5471c33a79b941f3525024485ce5e0107975"} Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.767244 5039 generic.go:334] "Generic (PLEG): container finished" podID="09227c0c-ba64-4216-8bac-a8c0f88706c3" containerID="b9305e61a2db1af7e2f870371d4363db7cdebd32f06e05c0c2227539dbfa2707" exitCode=1 Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.768265 5039 scope.go:117] "RemoveContainer" containerID="b9305e61a2db1af7e2f870371d4363db7cdebd32f06e05c0c2227539dbfa2707" Nov 24 13:41:39 crc kubenswrapper[5039]: E1124 13:41:39.768437 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-86b5f44b95-4qtzc_openstack(09227c0c-ba64-4216-8bac-a8c0f88706c3)\"" pod="openstack/heat-api-86b5f44b95-4qtzc" podUID="09227c0c-ba64-4216-8bac-a8c0f88706c3" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.768466 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-86b5f44b95-4qtzc" event={"ID":"09227c0c-ba64-4216-8bac-a8c0f88706c3","Type":"ContainerDied","Data":"b9305e61a2db1af7e2f870371d4363db7cdebd32f06e05c0c2227539dbfa2707"} Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.768543 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-7zj9p"] Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.805016 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-5b66587b55-thzjl"] Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.811884 5039 scope.go:117] "RemoveContainer" containerID="bfc41be41545f659adb793d89f7a38a9701c37ae9cf9302837c485713e46778b" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.817831 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.829598 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.841645 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:41:39 crc kubenswrapper[5039]: E1124 13:41:39.842143 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb35c10f-5f1f-4175-9174-4696bada484a" containerName="ceilometer-central-agent" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.842177 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb35c10f-5f1f-4175-9174-4696bada484a" containerName="ceilometer-central-agent" Nov 24 13:41:39 crc kubenswrapper[5039]: E1124 13:41:39.842195 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0840f945-89d7-40d1-b8dc-629d32793a6c" containerName="init" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.842204 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0840f945-89d7-40d1-b8dc-629d32793a6c" containerName="init" Nov 24 13:41:39 crc kubenswrapper[5039]: E1124 13:41:39.842219 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdafa1d4-1e80-420d-a2a1-4017bd9144be" containerName="heat-cfnapi" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.842225 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdafa1d4-1e80-420d-a2a1-4017bd9144be" containerName="heat-cfnapi" Nov 24 13:41:39 crc kubenswrapper[5039]: E1124 13:41:39.842237 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb35c10f-5f1f-4175-9174-4696bada484a" containerName="sg-core" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.842243 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb35c10f-5f1f-4175-9174-4696bada484a" containerName="sg-core" Nov 24 13:41:39 crc kubenswrapper[5039]: E1124 13:41:39.842251 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0840f945-89d7-40d1-b8dc-629d32793a6c" containerName="dnsmasq-dns" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.842257 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0840f945-89d7-40d1-b8dc-629d32793a6c" containerName="dnsmasq-dns" Nov 24 13:41:39 crc kubenswrapper[5039]: E1124 13:41:39.842266 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb35c10f-5f1f-4175-9174-4696bada484a" containerName="ceilometer-notification-agent" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.842273 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb35c10f-5f1f-4175-9174-4696bada484a" containerName="ceilometer-notification-agent" Nov 24 13:41:39 crc kubenswrapper[5039]: E1124 13:41:39.842289 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb35c10f-5f1f-4175-9174-4696bada484a" containerName="proxy-httpd" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.842295 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb35c10f-5f1f-4175-9174-4696bada484a" containerName="proxy-httpd" Nov 24 13:41:39 crc kubenswrapper[5039]: E1124 13:41:39.842317 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bb936ec-11da-428d-93ed-33745690864a" containerName="heat-api" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.842324 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bb936ec-11da-428d-93ed-33745690864a" containerName="heat-api" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.842519 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb35c10f-5f1f-4175-9174-4696bada484a" containerName="ceilometer-notification-agent" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.842551 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb35c10f-5f1f-4175-9174-4696bada484a" containerName="ceilometer-central-agent" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.842564 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb35c10f-5f1f-4175-9174-4696bada484a" containerName="sg-core" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.842571 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="fdafa1d4-1e80-420d-a2a1-4017bd9144be" containerName="heat-cfnapi" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.842584 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0840f945-89d7-40d1-b8dc-629d32793a6c" containerName="dnsmasq-dns" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.842593 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bb936ec-11da-428d-93ed-33745690864a" containerName="heat-api" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.842602 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb35c10f-5f1f-4175-9174-4696bada484a" containerName="proxy-httpd" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.864899 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.865042 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.868411 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.878952 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.902122 5039 scope.go:117] "RemoveContainer" containerID="034a273616dc694275fdcce5cfb64e6ed269c5c01da997861bc702be0bbaf3ad" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.946728 5039 scope.go:117] "RemoveContainer" containerID="cad996b1bb6a4808da9181a9f0062555a1541578283f9207c4b33b5add39ff39" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.993156 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-log-httpd\") pod \"ceilometer-0\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " pod="openstack/ceilometer-0" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.993255 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-run-httpd\") pod \"ceilometer-0\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " pod="openstack/ceilometer-0" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.993287 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " pod="openstack/ceilometer-0" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.993375 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-scripts\") pod \"ceilometer-0\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " pod="openstack/ceilometer-0" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.993409 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbmbb\" (UniqueName: \"kubernetes.io/projected/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-kube-api-access-fbmbb\") pod \"ceilometer-0\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " pod="openstack/ceilometer-0" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.993425 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-config-data\") pod \"ceilometer-0\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " pod="openstack/ceilometer-0" Nov 24 13:41:39 crc kubenswrapper[5039]: I1124 13:41:39.993454 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " pod="openstack/ceilometer-0" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.045621 5039 scope.go:117] "RemoveContainer" containerID="7f876b92196080d7a8a1b72cb590a7aa6142c21222f40983a1db923eeee0aec2" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.095672 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-scripts\") pod \"ceilometer-0\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " pod="openstack/ceilometer-0" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.095734 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbmbb\" (UniqueName: \"kubernetes.io/projected/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-kube-api-access-fbmbb\") pod \"ceilometer-0\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " pod="openstack/ceilometer-0" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.095755 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-config-data\") pod \"ceilometer-0\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " pod="openstack/ceilometer-0" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.095784 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " pod="openstack/ceilometer-0" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.095866 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-log-httpd\") pod \"ceilometer-0\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " pod="openstack/ceilometer-0" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.095948 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-run-httpd\") pod \"ceilometer-0\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " pod="openstack/ceilometer-0" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.095976 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " pod="openstack/ceilometer-0" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.097365 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-log-httpd\") pod \"ceilometer-0\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " pod="openstack/ceilometer-0" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.098320 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-run-httpd\") pod \"ceilometer-0\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " pod="openstack/ceilometer-0" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.113872 5039 scope.go:117] "RemoveContainer" containerID="fb38b8bf941b98e0d62d0bb1454a7e7f94676c78c2fe9268917699ed00e067d1" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.114231 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-scripts\") pod \"ceilometer-0\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " pod="openstack/ceilometer-0" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.114774 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " pod="openstack/ceilometer-0" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.115480 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " pod="openstack/ceilometer-0" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.119604 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-config-data\") pod \"ceilometer-0\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " pod="openstack/ceilometer-0" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.125610 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbmbb\" (UniqueName: \"kubernetes.io/projected/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-kube-api-access-fbmbb\") pod \"ceilometer-0\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " pod="openstack/ceilometer-0" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.179728 5039 scope.go:117] "RemoveContainer" containerID="d0356b6d2a8303b39436390c1cbbd099ab52659f247b728dbc7a2b8928e5f042" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.194700 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.338554 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0840f945-89d7-40d1-b8dc-629d32793a6c" path="/var/lib/kubelet/pods/0840f945-89d7-40d1-b8dc-629d32793a6c/volumes" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.339151 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8bb936ec-11da-428d-93ed-33745690864a" path="/var/lib/kubelet/pods/8bb936ec-11da-428d-93ed-33745690864a/volumes" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.339745 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb35c10f-5f1f-4175-9174-4696bada484a" path="/var/lib/kubelet/pods/bb35c10f-5f1f-4175-9174-4696bada484a/volumes" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.343233 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fdafa1d4-1e80-420d-a2a1-4017bd9144be" path="/var/lib/kubelet/pods/fdafa1d4-1e80-420d-a2a1-4017bd9144be/volumes" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.799350 5039 generic.go:334] "Generic (PLEG): container finished" podID="6721127c-79a1-4fd5-98db-0e99ff78de0e" containerID="11f507bd49d43b06d4559e076aa96bed1718d9906ec6ca45d15e51ce44d771f4" exitCode=0 Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.799458 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-69e6-account-create-5kzxp" event={"ID":"6721127c-79a1-4fd5-98db-0e99ff78de0e","Type":"ContainerDied","Data":"11f507bd49d43b06d4559e076aa96bed1718d9906ec6ca45d15e51ce44d771f4"} Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.799814 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-69e6-account-create-5kzxp" event={"ID":"6721127c-79a1-4fd5-98db-0e99ff78de0e","Type":"ContainerStarted","Data":"bc3d994234b1e0efda072dd77c5f10129d3f00dfb7495fea5a8c8d1f6fad8f20"} Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.802287 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-bdq8h" event={"ID":"a73195a6-8449-41fa-ad7c-5ce086c264ec","Type":"ContainerDied","Data":"7c6c7338818ea35148780b97081efddcc9fc41f98cab5a074b848dee9ae97e8b"} Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.802214 5039 generic.go:334] "Generic (PLEG): container finished" podID="a73195a6-8449-41fa-ad7c-5ce086c264ec" containerID="7c6c7338818ea35148780b97081efddcc9fc41f98cab5a074b848dee9ae97e8b" exitCode=0 Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.855820 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-78c587fb4d-fl6qr" event={"ID":"d505257c-0bc2-427b-8f9a-e5333460f461","Type":"ContainerStarted","Data":"424a59cbb122a5e7c7f6841bd3dd37f12af72b94bf4b781403646bc495cdd742"} Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.857666 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.860198 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5b66587b55-thzjl" event={"ID":"bd1bf6a5-309b-4960-8f37-34b006db3599","Type":"ContainerStarted","Data":"63b8f6f8a45285129ee6263d3ef6f961c40edb7b6b061e18e59ddc43bea13e2f"} Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.860223 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5b66587b55-thzjl" event={"ID":"bd1bf6a5-309b-4960-8f37-34b006db3599","Type":"ContainerStarted","Data":"f2394d70c687d3aa320a76cadcd91ec3b1e6104dc0daa0030f901fb5ab5af30a"} Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.863263 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.903707 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7f45f46b76-fckwv" event={"ID":"88f5edac-dd13-4a09-97a0-60f263e60f23","Type":"ContainerStarted","Data":"68de4bb0f4ad138e9ed720c1fa24cc278b351b0acb32b25e87bc7dec30cfd042"} Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.903842 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.904288 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-78c587fb4d-fl6qr" podStartSLOduration=11.904266538 podStartE2EDuration="11.904266538s" podCreationTimestamp="2025-11-24 13:41:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:41:40.901847389 +0000 UTC m=+1413.340971889" watchObservedRunningTime="2025-11-24 13:41:40.904266538 +0000 UTC m=+1413.343391038" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.922562 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-7f45f46b76-fckwv" podStartSLOduration=11.922541478 podStartE2EDuration="11.922541478s" podCreationTimestamp="2025-11-24 13:41:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:41:40.918799637 +0000 UTC m=+1413.357924137" watchObservedRunningTime="2025-11-24 13:41:40.922541478 +0000 UTC m=+1413.361665968" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.924749 5039 generic.go:334] "Generic (PLEG): container finished" podID="a41f29ac-696c-4aa9-aed7-b8959e15fa52" containerID="7f7eb8a637904516979cfa166420529ce38a51580cbe6d0f59544fbc11742b15" exitCode=0 Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.925027 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-7zj9p" event={"ID":"a41f29ac-696c-4aa9-aed7-b8959e15fa52","Type":"ContainerDied","Data":"7f7eb8a637904516979cfa166420529ce38a51580cbe6d0f59544fbc11742b15"} Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.925059 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-7zj9p" event={"ID":"a41f29ac-696c-4aa9-aed7-b8959e15fa52","Type":"ContainerStarted","Data":"5ebfdf91141d6c63139197603b7bc26896c924c7192a618dc228e6410c4823d2"} Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.937876 5039 generic.go:334] "Generic (PLEG): container finished" podID="7fe0bf4f-b6f8-48c0-b772-587a715e6c27" containerID="8437e26cb6893af74d085f476e072984c99df391d9ee19a4d901cea208fdde73" exitCode=0 Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.938011 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c4b94795b-c6c2f" event={"ID":"7fe0bf4f-b6f8-48c0-b772-587a715e6c27","Type":"ContainerDied","Data":"8437e26cb6893af74d085f476e072984c99df391d9ee19a4d901cea208fdde73"} Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.960569 5039 scope.go:117] "RemoveContainer" containerID="b9305e61a2db1af7e2f870371d4363db7cdebd32f06e05c0c2227539dbfa2707" Nov 24 13:41:40 crc kubenswrapper[5039]: E1124 13:41:40.960780 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-86b5f44b95-4qtzc_openstack(09227c0c-ba64-4216-8bac-a8c0f88706c3)\"" pod="openstack/heat-api-86b5f44b95-4qtzc" podUID="09227c0c-ba64-4216-8bac-a8c0f88706c3" Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.963381 5039 generic.go:334] "Generic (PLEG): container finished" podID="11fd95de-91de-400f-a931-ca7339de0a76" containerID="d7d421dc29cae07402f5080b927654682301e6b252a7c213462072ff461081f2" exitCode=0 Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.963455 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-5blmb" event={"ID":"11fd95de-91de-400f-a931-ca7339de0a76","Type":"ContainerDied","Data":"d7d421dc29cae07402f5080b927654682301e6b252a7c213462072ff461081f2"} Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.970088 5039 generic.go:334] "Generic (PLEG): container finished" podID="4b1111a8-04ac-478b-b1bf-557246566f05" containerID="647c2f85bd87a6d71e684d9b323535c07159bca744eaa84f2704934aace84ee0" exitCode=0 Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.970138 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2540-account-create-g8rgr" event={"ID":"4b1111a8-04ac-478b-b1bf-557246566f05","Type":"ContainerDied","Data":"647c2f85bd87a6d71e684d9b323535c07159bca744eaa84f2704934aace84ee0"} Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.980797 5039 generic.go:334] "Generic (PLEG): container finished" podID="a18b1393-b27b-42f7-938d-cf3321f376d6" containerID="77dc73bdbef96dc90c16a82371a584af473439a7be45247415e9a0b9c5dc44cc" exitCode=0 Nov 24 13:41:40 crc kubenswrapper[5039]: I1124 13:41:40.981045 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ba86-account-create-zsw6z" event={"ID":"a18b1393-b27b-42f7-938d-cf3321f376d6","Type":"ContainerDied","Data":"77dc73bdbef96dc90c16a82371a584af473439a7be45247415e9a0b9c5dc44cc"} Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.285365 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c4b94795b-c6c2f" Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.436481 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5lwdt\" (UniqueName: \"kubernetes.io/projected/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-kube-api-access-5lwdt\") pod \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\" (UID: \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\") " Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.437039 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-ovndb-tls-certs\") pod \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\" (UID: \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\") " Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.437069 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-combined-ca-bundle\") pod \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\" (UID: \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\") " Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.437184 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-config\") pod \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\" (UID: \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\") " Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.437278 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-httpd-config\") pod \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\" (UID: \"7fe0bf4f-b6f8-48c0-b772-587a715e6c27\") " Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.441964 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-kube-api-access-5lwdt" (OuterVolumeSpecName: "kube-api-access-5lwdt") pod "7fe0bf4f-b6f8-48c0-b772-587a715e6c27" (UID: "7fe0bf4f-b6f8-48c0-b772-587a715e6c27"). InnerVolumeSpecName "kube-api-access-5lwdt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.442189 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "7fe0bf4f-b6f8-48c0-b772-587a715e6c27" (UID: "7fe0bf4f-b6f8-48c0-b772-587a715e6c27"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.505453 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7fe0bf4f-b6f8-48c0-b772-587a715e6c27" (UID: "7fe0bf4f-b6f8-48c0-b772-587a715e6c27"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.526844 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-config" (OuterVolumeSpecName: "config") pod "7fe0bf4f-b6f8-48c0-b772-587a715e6c27" (UID: "7fe0bf4f-b6f8-48c0-b772-587a715e6c27"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.533682 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-86b5f44b95-4qtzc" Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.533788 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-86b5f44b95-4qtzc" Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.543494 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5lwdt\" (UniqueName: \"kubernetes.io/projected/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-kube-api-access-5lwdt\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.543546 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.543559 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.543568 5039 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.555967 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "7fe0bf4f-b6f8-48c0-b772-587a715e6c27" (UID: "7fe0bf4f-b6f8-48c0-b772-587a715e6c27"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.645085 5039 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fe0bf4f-b6f8-48c0-b772-587a715e6c27-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.994300 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5b66587b55-thzjl" event={"ID":"bd1bf6a5-309b-4960-8f37-34b006db3599","Type":"ContainerStarted","Data":"fb41f32753b829fbcf6401ae4729cf37c6ef576f684e1a5b94e30ed299f81119"} Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.997771 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c4b94795b-c6c2f" event={"ID":"7fe0bf4f-b6f8-48c0-b772-587a715e6c27","Type":"ContainerDied","Data":"4c909e5cf8f62a06eda948c3adf848a4d64e5c832b8c030a09771e6b065e41dc"} Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.997825 5039 scope.go:117] "RemoveContainer" containerID="eb2f7267049802178c3dd0c7578422d8ddad0e06220f23d0572624afbd97cc53" Nov 24 13:41:41 crc kubenswrapper[5039]: I1124 13:41:41.997786 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c4b94795b-c6c2f" Nov 24 13:41:42 crc kubenswrapper[5039]: I1124 13:41:42.003995 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347","Type":"ContainerStarted","Data":"7c95d3e1805f1234919c73ae83fca3a9f85403a43cb07e971fbc31116ab7dae9"} Nov 24 13:41:42 crc kubenswrapper[5039]: I1124 13:41:42.004049 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347","Type":"ContainerStarted","Data":"6a5fcf5d33bb9f71c923f847f121a8560c7103f360f3c00560156e0f457c34fd"} Nov 24 13:41:42 crc kubenswrapper[5039]: I1124 13:41:42.005017 5039 scope.go:117] "RemoveContainer" containerID="b9305e61a2db1af7e2f870371d4363db7cdebd32f06e05c0c2227539dbfa2707" Nov 24 13:41:42 crc kubenswrapper[5039]: E1124 13:41:42.005192 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-86b5f44b95-4qtzc_openstack(09227c0c-ba64-4216-8bac-a8c0f88706c3)\"" pod="openstack/heat-api-86b5f44b95-4qtzc" podUID="09227c0c-ba64-4216-8bac-a8c0f88706c3" Nov 24 13:41:42 crc kubenswrapper[5039]: I1124 13:41:42.034533 5039 scope.go:117] "RemoveContainer" containerID="8437e26cb6893af74d085f476e072984c99df391d9ee19a4d901cea208fdde73" Nov 24 13:41:42 crc kubenswrapper[5039]: I1124 13:41:42.035729 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-5b66587b55-thzjl" podStartSLOduration=12.035711823 podStartE2EDuration="12.035711823s" podCreationTimestamp="2025-11-24 13:41:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:41:42.028969317 +0000 UTC m=+1414.468093827" watchObservedRunningTime="2025-11-24 13:41:42.035711823 +0000 UTC m=+1414.474836313" Nov 24 13:41:42 crc kubenswrapper[5039]: I1124 13:41:42.055602 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-c4b94795b-c6c2f"] Nov 24 13:41:42 crc kubenswrapper[5039]: I1124 13:41:42.068322 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-c4b94795b-c6c2f"] Nov 24 13:41:42 crc kubenswrapper[5039]: E1124 13:41:42.161308 5039 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7fe0bf4f_b6f8_48c0_b772_587a715e6c27.slice/crio-4c909e5cf8f62a06eda948c3adf848a4d64e5c832b8c030a09771e6b065e41dc\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7fe0bf4f_b6f8_48c0_b772_587a715e6c27.slice\": RecentStats: unable to find data in memory cache]" Nov 24 13:41:42 crc kubenswrapper[5039]: I1124 13:41:42.346439 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7fe0bf4f-b6f8-48c0-b772-587a715e6c27" path="/var/lib/kubelet/pods/7fe0bf4f-b6f8-48c0-b772-587a715e6c27/volumes" Nov 24 13:41:42 crc kubenswrapper[5039]: I1124 13:41:42.821092 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-69e6-account-create-5kzxp" Nov 24 13:41:42 crc kubenswrapper[5039]: I1124 13:41:42.947224 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mz56g\" (UniqueName: \"kubernetes.io/projected/6721127c-79a1-4fd5-98db-0e99ff78de0e-kube-api-access-mz56g\") pod \"6721127c-79a1-4fd5-98db-0e99ff78de0e\" (UID: \"6721127c-79a1-4fd5-98db-0e99ff78de0e\") " Nov 24 13:41:42 crc kubenswrapper[5039]: I1124 13:41:42.947484 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6721127c-79a1-4fd5-98db-0e99ff78de0e-operator-scripts\") pod \"6721127c-79a1-4fd5-98db-0e99ff78de0e\" (UID: \"6721127c-79a1-4fd5-98db-0e99ff78de0e\") " Nov 24 13:41:42 crc kubenswrapper[5039]: I1124 13:41:42.955909 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6721127c-79a1-4fd5-98db-0e99ff78de0e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6721127c-79a1-4fd5-98db-0e99ff78de0e" (UID: "6721127c-79a1-4fd5-98db-0e99ff78de0e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:41:42 crc kubenswrapper[5039]: I1124 13:41:42.981169 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-bdq8h" Nov 24 13:41:42 crc kubenswrapper[5039]: I1124 13:41:42.993797 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6721127c-79a1-4fd5-98db-0e99ff78de0e-kube-api-access-mz56g" (OuterVolumeSpecName: "kube-api-access-mz56g") pod "6721127c-79a1-4fd5-98db-0e99ff78de0e" (UID: "6721127c-79a1-4fd5-98db-0e99ff78de0e"). InnerVolumeSpecName "kube-api-access-mz56g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.050791 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mz56g\" (UniqueName: \"kubernetes.io/projected/6721127c-79a1-4fd5-98db-0e99ff78de0e-kube-api-access-mz56g\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.051142 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6721127c-79a1-4fd5-98db-0e99ff78de0e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.113824 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-5blmb" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.126304 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347","Type":"ContainerStarted","Data":"4fce2b5786bdf3d14448c9e215699a029e0ab5a89b68610ee853a47a0a5197b7"} Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.147030 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-7zj9p" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.163729 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a41f29ac-696c-4aa9-aed7-b8959e15fa52-operator-scripts\") pod \"a41f29ac-696c-4aa9-aed7-b8959e15fa52\" (UID: \"a41f29ac-696c-4aa9-aed7-b8959e15fa52\") " Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.163772 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11fd95de-91de-400f-a931-ca7339de0a76-operator-scripts\") pod \"11fd95de-91de-400f-a931-ca7339de0a76\" (UID: \"11fd95de-91de-400f-a931-ca7339de0a76\") " Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.163860 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nj44z\" (UniqueName: \"kubernetes.io/projected/11fd95de-91de-400f-a931-ca7339de0a76-kube-api-access-nj44z\") pod \"11fd95de-91de-400f-a931-ca7339de0a76\" (UID: \"11fd95de-91de-400f-a931-ca7339de0a76\") " Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.163888 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a73195a6-8449-41fa-ad7c-5ce086c264ec-operator-scripts\") pod \"a73195a6-8449-41fa-ad7c-5ce086c264ec\" (UID: \"a73195a6-8449-41fa-ad7c-5ce086c264ec\") " Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.163958 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wpvrw\" (UniqueName: \"kubernetes.io/projected/a41f29ac-696c-4aa9-aed7-b8959e15fa52-kube-api-access-wpvrw\") pod \"a41f29ac-696c-4aa9-aed7-b8959e15fa52\" (UID: \"a41f29ac-696c-4aa9-aed7-b8959e15fa52\") " Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.163982 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mx2vv\" (UniqueName: \"kubernetes.io/projected/a73195a6-8449-41fa-ad7c-5ce086c264ec-kube-api-access-mx2vv\") pod \"a73195a6-8449-41fa-ad7c-5ce086c264ec\" (UID: \"a73195a6-8449-41fa-ad7c-5ce086c264ec\") " Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.170022 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a73195a6-8449-41fa-ad7c-5ce086c264ec-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a73195a6-8449-41fa-ad7c-5ce086c264ec" (UID: "a73195a6-8449-41fa-ad7c-5ce086c264ec"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.172808 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a73195a6-8449-41fa-ad7c-5ce086c264ec-kube-api-access-mx2vv" (OuterVolumeSpecName: "kube-api-access-mx2vv") pod "a73195a6-8449-41fa-ad7c-5ce086c264ec" (UID: "a73195a6-8449-41fa-ad7c-5ce086c264ec"). InnerVolumeSpecName "kube-api-access-mx2vv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.173485 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a41f29ac-696c-4aa9-aed7-b8959e15fa52-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a41f29ac-696c-4aa9-aed7-b8959e15fa52" (UID: "a41f29ac-696c-4aa9-aed7-b8959e15fa52"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.173722 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11fd95de-91de-400f-a931-ca7339de0a76-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "11fd95de-91de-400f-a931-ca7339de0a76" (UID: "11fd95de-91de-400f-a931-ca7339de0a76"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.174775 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2540-account-create-g8rgr" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.174981 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-69e6-account-create-5kzxp" event={"ID":"6721127c-79a1-4fd5-98db-0e99ff78de0e","Type":"ContainerDied","Data":"bc3d994234b1e0efda072dd77c5f10129d3f00dfb7495fea5a8c8d1f6fad8f20"} Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.175011 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bc3d994234b1e0efda072dd77c5f10129d3f00dfb7495fea5a8c8d1f6fad8f20" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.175042 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-69e6-account-create-5kzxp" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.179199 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a41f29ac-696c-4aa9-aed7-b8959e15fa52-kube-api-access-wpvrw" (OuterVolumeSpecName: "kube-api-access-wpvrw") pod "a41f29ac-696c-4aa9-aed7-b8959e15fa52" (UID: "a41f29ac-696c-4aa9-aed7-b8959e15fa52"). InnerVolumeSpecName "kube-api-access-wpvrw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.179283 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11fd95de-91de-400f-a931-ca7339de0a76-kube-api-access-nj44z" (OuterVolumeSpecName: "kube-api-access-nj44z") pod "11fd95de-91de-400f-a931-ca7339de0a76" (UID: "11fd95de-91de-400f-a931-ca7339de0a76"). InnerVolumeSpecName "kube-api-access-nj44z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.201442 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-bdq8h" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.201575 5039 scope.go:117] "RemoveContainer" containerID="b9305e61a2db1af7e2f870371d4363db7cdebd32f06e05c0c2227539dbfa2707" Nov 24 13:41:43 crc kubenswrapper[5039]: E1124 13:41:43.201955 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-86b5f44b95-4qtzc_openstack(09227c0c-ba64-4216-8bac-a8c0f88706c3)\"" pod="openstack/heat-api-86b5f44b95-4qtzc" podUID="09227c0c-ba64-4216-8bac-a8c0f88706c3" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.201997 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-bdq8h" event={"ID":"a73195a6-8449-41fa-ad7c-5ce086c264ec","Type":"ContainerDied","Data":"f74cca513ac60034931a8210d0f24d4e718563299cf10cec60b14f3162038f92"} Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.202016 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f74cca513ac60034931a8210d0f24d4e718563299cf10cec60b14f3162038f92" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.202512 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.203268 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.203496 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ba86-account-create-zsw6z" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.266041 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dzfvj\" (UniqueName: \"kubernetes.io/projected/4b1111a8-04ac-478b-b1bf-557246566f05-kube-api-access-dzfvj\") pod \"4b1111a8-04ac-478b-b1bf-557246566f05\" (UID: \"4b1111a8-04ac-478b-b1bf-557246566f05\") " Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.266107 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b1111a8-04ac-478b-b1bf-557246566f05-operator-scripts\") pod \"4b1111a8-04ac-478b-b1bf-557246566f05\" (UID: \"4b1111a8-04ac-478b-b1bf-557246566f05\") " Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.266164 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a18b1393-b27b-42f7-938d-cf3321f376d6-operator-scripts\") pod \"a18b1393-b27b-42f7-938d-cf3321f376d6\" (UID: \"a18b1393-b27b-42f7-938d-cf3321f376d6\") " Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.266246 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j522v\" (UniqueName: \"kubernetes.io/projected/a18b1393-b27b-42f7-938d-cf3321f376d6-kube-api-access-j522v\") pod \"a18b1393-b27b-42f7-938d-cf3321f376d6\" (UID: \"a18b1393-b27b-42f7-938d-cf3321f376d6\") " Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.268088 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b1111a8-04ac-478b-b1bf-557246566f05-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4b1111a8-04ac-478b-b1bf-557246566f05" (UID: "4b1111a8-04ac-478b-b1bf-557246566f05"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.269050 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a18b1393-b27b-42f7-938d-cf3321f376d6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a18b1393-b27b-42f7-938d-cf3321f376d6" (UID: "a18b1393-b27b-42f7-938d-cf3321f376d6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.273553 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b1111a8-04ac-478b-b1bf-557246566f05-kube-api-access-dzfvj" (OuterVolumeSpecName: "kube-api-access-dzfvj") pod "4b1111a8-04ac-478b-b1bf-557246566f05" (UID: "4b1111a8-04ac-478b-b1bf-557246566f05"). InnerVolumeSpecName "kube-api-access-dzfvj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.273678 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a18b1393-b27b-42f7-938d-cf3321f376d6-kube-api-access-j522v" (OuterVolumeSpecName: "kube-api-access-j522v") pod "a18b1393-b27b-42f7-938d-cf3321f376d6" (UID: "a18b1393-b27b-42f7-938d-cf3321f376d6"). InnerVolumeSpecName "kube-api-access-j522v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.274658 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wpvrw\" (UniqueName: \"kubernetes.io/projected/a41f29ac-696c-4aa9-aed7-b8959e15fa52-kube-api-access-wpvrw\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.276755 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mx2vv\" (UniqueName: \"kubernetes.io/projected/a73195a6-8449-41fa-ad7c-5ce086c264ec-kube-api-access-mx2vv\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.276808 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a41f29ac-696c-4aa9-aed7-b8959e15fa52-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.276823 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11fd95de-91de-400f-a931-ca7339de0a76-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.276835 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dzfvj\" (UniqueName: \"kubernetes.io/projected/4b1111a8-04ac-478b-b1bf-557246566f05-kube-api-access-dzfvj\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.276849 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b1111a8-04ac-478b-b1bf-557246566f05-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.276858 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a18b1393-b27b-42f7-938d-cf3321f376d6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.276870 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nj44z\" (UniqueName: \"kubernetes.io/projected/11fd95de-91de-400f-a931-ca7339de0a76-kube-api-access-nj44z\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.276880 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a73195a6-8449-41fa-ad7c-5ce086c264ec-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.276888 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j522v\" (UniqueName: \"kubernetes.io/projected/a18b1393-b27b-42f7-938d-cf3321f376d6-kube-api-access-j522v\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:43 crc kubenswrapper[5039]: I1124 13:41:43.492873 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.213706 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347","Type":"ContainerStarted","Data":"f7b51fcf9a4ac54c159cc943b9ba66e512df214012f946a413a6ba4ee2c56827"} Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.215606 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ba86-account-create-zsw6z" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.215626 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ba86-account-create-zsw6z" event={"ID":"a18b1393-b27b-42f7-938d-cf3321f376d6","Type":"ContainerDied","Data":"9300f7405833a20aba0b8d3f189fc0f23dcd6ca4676a1d6d2016c3f13195cf6b"} Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.216071 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9300f7405833a20aba0b8d3f189fc0f23dcd6ca4676a1d6d2016c3f13195cf6b" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.217479 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-5blmb" event={"ID":"11fd95de-91de-400f-a931-ca7339de0a76","Type":"ContainerDied","Data":"e057b720f52694881cab5be2c34eae1b8b2581202b99746fa0405680fd3f9e74"} Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.217528 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e057b720f52694881cab5be2c34eae1b8b2581202b99746fa0405680fd3f9e74" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.217546 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-5blmb" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.219306 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2540-account-create-g8rgr" event={"ID":"4b1111a8-04ac-478b-b1bf-557246566f05","Type":"ContainerDied","Data":"1b789aac647b6c3d503dc10cc2688731ee2f03ce6c0b78d01616855d4fda0f17"} Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.219346 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1b789aac647b6c3d503dc10cc2688731ee2f03ce6c0b78d01616855d4fda0f17" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.219324 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2540-account-create-g8rgr" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.221907 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-7zj9p" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.223652 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-7zj9p" event={"ID":"a41f29ac-696c-4aa9-aed7-b8959e15fa52","Type":"ContainerDied","Data":"5ebfdf91141d6c63139197603b7bc26896c924c7192a618dc228e6410c4823d2"} Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.223689 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5ebfdf91141d6c63139197603b7bc26896c924c7192a618dc228e6410c4823d2" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.849729 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4ctcm"] Nov 24 13:41:44 crc kubenswrapper[5039]: E1124 13:41:44.850557 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11fd95de-91de-400f-a931-ca7339de0a76" containerName="mariadb-database-create" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.850573 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="11fd95de-91de-400f-a931-ca7339de0a76" containerName="mariadb-database-create" Nov 24 13:41:44 crc kubenswrapper[5039]: E1124 13:41:44.850590 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a41f29ac-696c-4aa9-aed7-b8959e15fa52" containerName="mariadb-database-create" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.850599 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="a41f29ac-696c-4aa9-aed7-b8959e15fa52" containerName="mariadb-database-create" Nov 24 13:41:44 crc kubenswrapper[5039]: E1124 13:41:44.850627 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a18b1393-b27b-42f7-938d-cf3321f376d6" containerName="mariadb-account-create" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.850635 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="a18b1393-b27b-42f7-938d-cf3321f376d6" containerName="mariadb-account-create" Nov 24 13:41:44 crc kubenswrapper[5039]: E1124 13:41:44.850646 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b1111a8-04ac-478b-b1bf-557246566f05" containerName="mariadb-account-create" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.850655 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b1111a8-04ac-478b-b1bf-557246566f05" containerName="mariadb-account-create" Nov 24 13:41:44 crc kubenswrapper[5039]: E1124 13:41:44.850668 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fe0bf4f-b6f8-48c0-b772-587a715e6c27" containerName="neutron-httpd" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.850675 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fe0bf4f-b6f8-48c0-b772-587a715e6c27" containerName="neutron-httpd" Nov 24 13:41:44 crc kubenswrapper[5039]: E1124 13:41:44.850702 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6721127c-79a1-4fd5-98db-0e99ff78de0e" containerName="mariadb-account-create" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.850709 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="6721127c-79a1-4fd5-98db-0e99ff78de0e" containerName="mariadb-account-create" Nov 24 13:41:44 crc kubenswrapper[5039]: E1124 13:41:44.850723 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a73195a6-8449-41fa-ad7c-5ce086c264ec" containerName="mariadb-database-create" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.850731 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="a73195a6-8449-41fa-ad7c-5ce086c264ec" containerName="mariadb-database-create" Nov 24 13:41:44 crc kubenswrapper[5039]: E1124 13:41:44.850745 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fe0bf4f-b6f8-48c0-b772-587a715e6c27" containerName="neutron-api" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.850754 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fe0bf4f-b6f8-48c0-b772-587a715e6c27" containerName="neutron-api" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.850973 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="6721127c-79a1-4fd5-98db-0e99ff78de0e" containerName="mariadb-account-create" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.850991 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="11fd95de-91de-400f-a931-ca7339de0a76" containerName="mariadb-database-create" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.851006 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="a41f29ac-696c-4aa9-aed7-b8959e15fa52" containerName="mariadb-database-create" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.851020 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fe0bf4f-b6f8-48c0-b772-587a715e6c27" containerName="neutron-httpd" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.851028 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="a18b1393-b27b-42f7-938d-cf3321f376d6" containerName="mariadb-account-create" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.851041 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fe0bf4f-b6f8-48c0-b772-587a715e6c27" containerName="neutron-api" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.851057 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b1111a8-04ac-478b-b1bf-557246566f05" containerName="mariadb-account-create" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.851068 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="a73195a6-8449-41fa-ad7c-5ce086c264ec" containerName="mariadb-database-create" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.852783 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4ctcm" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.874028 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4ctcm"] Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.910433 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7cdffa6-8fc4-44df-b52a-20f44c069d4d-catalog-content\") pod \"redhat-marketplace-4ctcm\" (UID: \"c7cdffa6-8fc4-44df-b52a-20f44c069d4d\") " pod="openshift-marketplace/redhat-marketplace-4ctcm" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.910607 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nh75f\" (UniqueName: \"kubernetes.io/projected/c7cdffa6-8fc4-44df-b52a-20f44c069d4d-kube-api-access-nh75f\") pod \"redhat-marketplace-4ctcm\" (UID: \"c7cdffa6-8fc4-44df-b52a-20f44c069d4d\") " pod="openshift-marketplace/redhat-marketplace-4ctcm" Nov 24 13:41:44 crc kubenswrapper[5039]: I1124 13:41:44.910685 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7cdffa6-8fc4-44df-b52a-20f44c069d4d-utilities\") pod \"redhat-marketplace-4ctcm\" (UID: \"c7cdffa6-8fc4-44df-b52a-20f44c069d4d\") " pod="openshift-marketplace/redhat-marketplace-4ctcm" Nov 24 13:41:45 crc kubenswrapper[5039]: I1124 13:41:45.013174 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7cdffa6-8fc4-44df-b52a-20f44c069d4d-catalog-content\") pod \"redhat-marketplace-4ctcm\" (UID: \"c7cdffa6-8fc4-44df-b52a-20f44c069d4d\") " pod="openshift-marketplace/redhat-marketplace-4ctcm" Nov 24 13:41:45 crc kubenswrapper[5039]: I1124 13:41:45.013339 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nh75f\" (UniqueName: \"kubernetes.io/projected/c7cdffa6-8fc4-44df-b52a-20f44c069d4d-kube-api-access-nh75f\") pod \"redhat-marketplace-4ctcm\" (UID: \"c7cdffa6-8fc4-44df-b52a-20f44c069d4d\") " pod="openshift-marketplace/redhat-marketplace-4ctcm" Nov 24 13:41:45 crc kubenswrapper[5039]: I1124 13:41:45.013436 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7cdffa6-8fc4-44df-b52a-20f44c069d4d-utilities\") pod \"redhat-marketplace-4ctcm\" (UID: \"c7cdffa6-8fc4-44df-b52a-20f44c069d4d\") " pod="openshift-marketplace/redhat-marketplace-4ctcm" Nov 24 13:41:45 crc kubenswrapper[5039]: I1124 13:41:45.013818 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7cdffa6-8fc4-44df-b52a-20f44c069d4d-catalog-content\") pod \"redhat-marketplace-4ctcm\" (UID: \"c7cdffa6-8fc4-44df-b52a-20f44c069d4d\") " pod="openshift-marketplace/redhat-marketplace-4ctcm" Nov 24 13:41:45 crc kubenswrapper[5039]: I1124 13:41:45.013938 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7cdffa6-8fc4-44df-b52a-20f44c069d4d-utilities\") pod \"redhat-marketplace-4ctcm\" (UID: \"c7cdffa6-8fc4-44df-b52a-20f44c069d4d\") " pod="openshift-marketplace/redhat-marketplace-4ctcm" Nov 24 13:41:45 crc kubenswrapper[5039]: I1124 13:41:45.051645 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nh75f\" (UniqueName: \"kubernetes.io/projected/c7cdffa6-8fc4-44df-b52a-20f44c069d4d-kube-api-access-nh75f\") pod \"redhat-marketplace-4ctcm\" (UID: \"c7cdffa6-8fc4-44df-b52a-20f44c069d4d\") " pod="openshift-marketplace/redhat-marketplace-4ctcm" Nov 24 13:41:45 crc kubenswrapper[5039]: I1124 13:41:45.173264 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4ctcm" Nov 24 13:41:45 crc kubenswrapper[5039]: I1124 13:41:45.307862 5039 scope.go:117] "RemoveContainer" containerID="251a8685a592e17b193cd81ef17ae5bc48f0554a7ee85b6b2c4faa3f1e0e1804" Nov 24 13:41:45 crc kubenswrapper[5039]: I1124 13:41:45.323346 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:45 crc kubenswrapper[5039]: I1124 13:41:45.847121 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4ctcm"] Nov 24 13:41:45 crc kubenswrapper[5039]: W1124 13:41:45.851999 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc7cdffa6_8fc4_44df_b52a_20f44c069d4d.slice/crio-21a218f3341e0d2211256f53758c1c19cc28e43ae1662eaa695743337a1808de WatchSource:0}: Error finding container 21a218f3341e0d2211256f53758c1c19cc28e43ae1662eaa695743337a1808de: Status 404 returned error can't find the container with id 21a218f3341e0d2211256f53758c1c19cc28e43ae1662eaa695743337a1808de Nov 24 13:41:46 crc kubenswrapper[5039]: I1124 13:41:46.260531 5039 generic.go:334] "Generic (PLEG): container finished" podID="291fbee7-ab14-439c-9bfc-845225d607ae" containerID="2eeb7af41a0131608cb59977d943a6f25b03d22c875272db1765e5385e1cab19" exitCode=1 Nov 24 13:41:46 crc kubenswrapper[5039]: I1124 13:41:46.260629 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-c9b799fcd-wrw68" event={"ID":"291fbee7-ab14-439c-9bfc-845225d607ae","Type":"ContainerDied","Data":"2eeb7af41a0131608cb59977d943a6f25b03d22c875272db1765e5385e1cab19"} Nov 24 13:41:46 crc kubenswrapper[5039]: I1124 13:41:46.260910 5039 scope.go:117] "RemoveContainer" containerID="251a8685a592e17b193cd81ef17ae5bc48f0554a7ee85b6b2c4faa3f1e0e1804" Nov 24 13:41:46 crc kubenswrapper[5039]: I1124 13:41:46.261678 5039 scope.go:117] "RemoveContainer" containerID="2eeb7af41a0131608cb59977d943a6f25b03d22c875272db1765e5385e1cab19" Nov 24 13:41:46 crc kubenswrapper[5039]: E1124 13:41:46.262007 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 20s restarting failed container=heat-cfnapi pod=heat-cfnapi-c9b799fcd-wrw68_openstack(291fbee7-ab14-439c-9bfc-845225d607ae)\"" pod="openstack/heat-cfnapi-c9b799fcd-wrw68" podUID="291fbee7-ab14-439c-9bfc-845225d607ae" Nov 24 13:41:46 crc kubenswrapper[5039]: I1124 13:41:46.268378 5039 generic.go:334] "Generic (PLEG): container finished" podID="c7cdffa6-8fc4-44df-b52a-20f44c069d4d" containerID="db989634c7eaab05ad07fabb57cce29c3483d8d86c9701e7004fa2cecd442672" exitCode=0 Nov 24 13:41:46 crc kubenswrapper[5039]: I1124 13:41:46.268429 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ctcm" event={"ID":"c7cdffa6-8fc4-44df-b52a-20f44c069d4d","Type":"ContainerDied","Data":"db989634c7eaab05ad07fabb57cce29c3483d8d86c9701e7004fa2cecd442672"} Nov 24 13:41:46 crc kubenswrapper[5039]: I1124 13:41:46.268456 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ctcm" event={"ID":"c7cdffa6-8fc4-44df-b52a-20f44c069d4d","Type":"ContainerStarted","Data":"21a218f3341e0d2211256f53758c1c19cc28e43ae1662eaa695743337a1808de"} Nov 24 13:41:46 crc kubenswrapper[5039]: I1124 13:41:46.283273 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347","Type":"ContainerStarted","Data":"91bd8c255ae50cdf8a129746e8c813d20eb3b3f91634ce54fce3728b61e80606"} Nov 24 13:41:46 crc kubenswrapper[5039]: I1124 13:41:46.283425 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" containerName="ceilometer-central-agent" containerID="cri-o://7c95d3e1805f1234919c73ae83fca3a9f85403a43cb07e971fbc31116ab7dae9" gracePeriod=30 Nov 24 13:41:46 crc kubenswrapper[5039]: I1124 13:41:46.283565 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" containerName="proxy-httpd" containerID="cri-o://91bd8c255ae50cdf8a129746e8c813d20eb3b3f91634ce54fce3728b61e80606" gracePeriod=30 Nov 24 13:41:46 crc kubenswrapper[5039]: I1124 13:41:46.283600 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" containerName="sg-core" containerID="cri-o://f7b51fcf9a4ac54c159cc943b9ba66e512df214012f946a413a6ba4ee2c56827" gracePeriod=30 Nov 24 13:41:46 crc kubenswrapper[5039]: I1124 13:41:46.283629 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" containerName="ceilometer-notification-agent" containerID="cri-o://4fce2b5786bdf3d14448c9e215699a029e0ab5a89b68610ee853a47a0a5197b7" gracePeriod=30 Nov 24 13:41:46 crc kubenswrapper[5039]: I1124 13:41:46.295055 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-5b66587b55-thzjl" Nov 24 13:41:46 crc kubenswrapper[5039]: I1124 13:41:46.319127 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.090003523 podStartE2EDuration="7.319110806s" podCreationTimestamp="2025-11-24 13:41:39 +0000 UTC" firstStartedPulling="2025-11-24 13:41:40.89947845 +0000 UTC m=+1413.338602940" lastFinishedPulling="2025-11-24 13:41:45.128585723 +0000 UTC m=+1417.567710223" observedRunningTime="2025-11-24 13:41:46.315689861 +0000 UTC m=+1418.754814361" watchObservedRunningTime="2025-11-24 13:41:46.319110806 +0000 UTC m=+1418.758235296" Nov 24 13:41:46 crc kubenswrapper[5039]: I1124 13:41:46.507025 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-75cf4567b8-dlwgp" Nov 24 13:41:46 crc kubenswrapper[5039]: I1124 13:41:46.575718 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-78dc996954-b5s9v"] Nov 24 13:41:46 crc kubenswrapper[5039]: I1124 13:41:46.575924 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-78dc996954-b5s9v" podUID="8625b618-e756-46ad-a646-c94e824a1e83" containerName="heat-engine" containerID="cri-o://cc040e658a5e864f49b6a89bf502538beec554a4a79a4c2d235f2963492cdd7e" gracePeriod=60 Nov 24 13:41:46 crc kubenswrapper[5039]: I1124 13:41:46.662541 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-c9b799fcd-wrw68" Nov 24 13:41:46 crc kubenswrapper[5039]: I1124 13:41:46.662607 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-c9b799fcd-wrw68" Nov 24 13:41:47 crc kubenswrapper[5039]: I1124 13:41:47.294229 5039 scope.go:117] "RemoveContainer" containerID="2eeb7af41a0131608cb59977d943a6f25b03d22c875272db1765e5385e1cab19" Nov 24 13:41:47 crc kubenswrapper[5039]: E1124 13:41:47.294782 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 20s restarting failed container=heat-cfnapi pod=heat-cfnapi-c9b799fcd-wrw68_openstack(291fbee7-ab14-439c-9bfc-845225d607ae)\"" pod="openstack/heat-cfnapi-c9b799fcd-wrw68" podUID="291fbee7-ab14-439c-9bfc-845225d607ae" Nov 24 13:41:47 crc kubenswrapper[5039]: I1124 13:41:47.295430 5039 generic.go:334] "Generic (PLEG): container finished" podID="c7cdffa6-8fc4-44df-b52a-20f44c069d4d" containerID="bc5d7b9d1f08719a82a5fbdd51034af3029cd5ab07ae23600f01ebf340063980" exitCode=0 Nov 24 13:41:47 crc kubenswrapper[5039]: I1124 13:41:47.295527 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ctcm" event={"ID":"c7cdffa6-8fc4-44df-b52a-20f44c069d4d","Type":"ContainerDied","Data":"bc5d7b9d1f08719a82a5fbdd51034af3029cd5ab07ae23600f01ebf340063980"} Nov 24 13:41:47 crc kubenswrapper[5039]: I1124 13:41:47.299329 5039 generic.go:334] "Generic (PLEG): container finished" podID="ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" containerID="91bd8c255ae50cdf8a129746e8c813d20eb3b3f91634ce54fce3728b61e80606" exitCode=0 Nov 24 13:41:47 crc kubenswrapper[5039]: I1124 13:41:47.299350 5039 generic.go:334] "Generic (PLEG): container finished" podID="ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" containerID="f7b51fcf9a4ac54c159cc943b9ba66e512df214012f946a413a6ba4ee2c56827" exitCode=2 Nov 24 13:41:47 crc kubenswrapper[5039]: I1124 13:41:47.299359 5039 generic.go:334] "Generic (PLEG): container finished" podID="ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" containerID="4fce2b5786bdf3d14448c9e215699a029e0ab5a89b68610ee853a47a0a5197b7" exitCode=0 Nov 24 13:41:47 crc kubenswrapper[5039]: I1124 13:41:47.299409 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347","Type":"ContainerDied","Data":"91bd8c255ae50cdf8a129746e8c813d20eb3b3f91634ce54fce3728b61e80606"} Nov 24 13:41:47 crc kubenswrapper[5039]: I1124 13:41:47.299452 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347","Type":"ContainerDied","Data":"f7b51fcf9a4ac54c159cc943b9ba66e512df214012f946a413a6ba4ee2c56827"} Nov 24 13:41:47 crc kubenswrapper[5039]: I1124 13:41:47.299465 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347","Type":"ContainerDied","Data":"4fce2b5786bdf3d14448c9e215699a029e0ab5a89b68610ee853a47a0a5197b7"} Nov 24 13:41:48 crc kubenswrapper[5039]: E1124 13:41:48.000442 5039 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cc040e658a5e864f49b6a89bf502538beec554a4a79a4c2d235f2963492cdd7e" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 24 13:41:48 crc kubenswrapper[5039]: E1124 13:41:48.002186 5039 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cc040e658a5e864f49b6a89bf502538beec554a4a79a4c2d235f2963492cdd7e" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 24 13:41:48 crc kubenswrapper[5039]: E1124 13:41:48.003775 5039 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cc040e658a5e864f49b6a89bf502538beec554a4a79a4c2d235f2963492cdd7e" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 24 13:41:48 crc kubenswrapper[5039]: E1124 13:41:48.003807 5039 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-78dc996954-b5s9v" podUID="8625b618-e756-46ad-a646-c94e824a1e83" containerName="heat-engine" Nov 24 13:41:48 crc kubenswrapper[5039]: I1124 13:41:48.248753 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-58cdd4bdc9-hd6w5" podUID="8bb936ec-11da-428d-93ed-33745690864a" containerName="heat-api" probeResult="failure" output="Get \"http://10.217.0.199:8004/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 24 13:41:48 crc kubenswrapper[5039]: I1124 13:41:48.317670 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ctcm" event={"ID":"c7cdffa6-8fc4-44df-b52a-20f44c069d4d","Type":"ContainerStarted","Data":"95b4023bb5bcdade330eae161170db7cc6d3f30d59f5262423536395acbe2576"} Nov 24 13:41:48 crc kubenswrapper[5039]: I1124 13:41:48.345649 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4ctcm" podStartSLOduration=2.766575211 podStartE2EDuration="4.345624036s" podCreationTimestamp="2025-11-24 13:41:44 +0000 UTC" firstStartedPulling="2025-11-24 13:41:46.273359407 +0000 UTC m=+1418.712483897" lastFinishedPulling="2025-11-24 13:41:47.852408222 +0000 UTC m=+1420.291532722" observedRunningTime="2025-11-24 13:41:48.335915447 +0000 UTC m=+1420.775039977" watchObservedRunningTime="2025-11-24 13:41:48.345624036 +0000 UTC m=+1420.784748536" Nov 24 13:41:49 crc kubenswrapper[5039]: I1124 13:41:49.012029 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-qn652"] Nov 24 13:41:49 crc kubenswrapper[5039]: I1124 13:41:49.013793 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-qn652" Nov 24 13:41:49 crc kubenswrapper[5039]: I1124 13:41:49.015703 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 24 13:41:49 crc kubenswrapper[5039]: I1124 13:41:49.015715 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 24 13:41:49 crc kubenswrapper[5039]: I1124 13:41:49.016211 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-9nq8q" Nov 24 13:41:49 crc kubenswrapper[5039]: I1124 13:41:49.024307 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-qn652"] Nov 24 13:41:49 crc kubenswrapper[5039]: I1124 13:41:49.212066 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8s9qz\" (UniqueName: \"kubernetes.io/projected/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-kube-api-access-8s9qz\") pod \"nova-cell0-conductor-db-sync-qn652\" (UID: \"5a720981-59f4-4a6c-bc6a-ea08f5aa101b\") " pod="openstack/nova-cell0-conductor-db-sync-qn652" Nov 24 13:41:49 crc kubenswrapper[5039]: I1124 13:41:49.212189 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-qn652\" (UID: \"5a720981-59f4-4a6c-bc6a-ea08f5aa101b\") " pod="openstack/nova-cell0-conductor-db-sync-qn652" Nov 24 13:41:49 crc kubenswrapper[5039]: I1124 13:41:49.212228 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-config-data\") pod \"nova-cell0-conductor-db-sync-qn652\" (UID: \"5a720981-59f4-4a6c-bc6a-ea08f5aa101b\") " pod="openstack/nova-cell0-conductor-db-sync-qn652" Nov 24 13:41:49 crc kubenswrapper[5039]: I1124 13:41:49.212306 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-scripts\") pod \"nova-cell0-conductor-db-sync-qn652\" (UID: \"5a720981-59f4-4a6c-bc6a-ea08f5aa101b\") " pod="openstack/nova-cell0-conductor-db-sync-qn652" Nov 24 13:41:49 crc kubenswrapper[5039]: I1124 13:41:49.315112 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8s9qz\" (UniqueName: \"kubernetes.io/projected/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-kube-api-access-8s9qz\") pod \"nova-cell0-conductor-db-sync-qn652\" (UID: \"5a720981-59f4-4a6c-bc6a-ea08f5aa101b\") " pod="openstack/nova-cell0-conductor-db-sync-qn652" Nov 24 13:41:49 crc kubenswrapper[5039]: I1124 13:41:49.315252 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-qn652\" (UID: \"5a720981-59f4-4a6c-bc6a-ea08f5aa101b\") " pod="openstack/nova-cell0-conductor-db-sync-qn652" Nov 24 13:41:49 crc kubenswrapper[5039]: I1124 13:41:49.315292 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-config-data\") pod \"nova-cell0-conductor-db-sync-qn652\" (UID: \"5a720981-59f4-4a6c-bc6a-ea08f5aa101b\") " pod="openstack/nova-cell0-conductor-db-sync-qn652" Nov 24 13:41:49 crc kubenswrapper[5039]: I1124 13:41:49.315372 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-scripts\") pod \"nova-cell0-conductor-db-sync-qn652\" (UID: \"5a720981-59f4-4a6c-bc6a-ea08f5aa101b\") " pod="openstack/nova-cell0-conductor-db-sync-qn652" Nov 24 13:41:49 crc kubenswrapper[5039]: I1124 13:41:49.322572 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-scripts\") pod \"nova-cell0-conductor-db-sync-qn652\" (UID: \"5a720981-59f4-4a6c-bc6a-ea08f5aa101b\") " pod="openstack/nova-cell0-conductor-db-sync-qn652" Nov 24 13:41:49 crc kubenswrapper[5039]: I1124 13:41:49.324493 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-qn652\" (UID: \"5a720981-59f4-4a6c-bc6a-ea08f5aa101b\") " pod="openstack/nova-cell0-conductor-db-sync-qn652" Nov 24 13:41:49 crc kubenswrapper[5039]: I1124 13:41:49.331675 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-config-data\") pod \"nova-cell0-conductor-db-sync-qn652\" (UID: \"5a720981-59f4-4a6c-bc6a-ea08f5aa101b\") " pod="openstack/nova-cell0-conductor-db-sync-qn652" Nov 24 13:41:49 crc kubenswrapper[5039]: I1124 13:41:49.341954 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8s9qz\" (UniqueName: \"kubernetes.io/projected/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-kube-api-access-8s9qz\") pod \"nova-cell0-conductor-db-sync-qn652\" (UID: \"5a720981-59f4-4a6c-bc6a-ea08f5aa101b\") " pod="openstack/nova-cell0-conductor-db-sync-qn652" Nov 24 13:41:49 crc kubenswrapper[5039]: I1124 13:41:49.343777 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-qn652" Nov 24 13:41:49 crc kubenswrapper[5039]: I1124 13:41:49.885020 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-qn652"] Nov 24 13:41:49 crc kubenswrapper[5039]: W1124 13:41:49.886054 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5a720981_59f4_4a6c_bc6a_ea08f5aa101b.slice/crio-1c6ee46f8f6b152253e81948b1655dc106dbd1997179ba9c9ae16f95b5889a6a WatchSource:0}: Error finding container 1c6ee46f8f6b152253e81948b1655dc106dbd1997179ba9c9ae16f95b5889a6a: Status 404 returned error can't find the container with id 1c6ee46f8f6b152253e81948b1655dc106dbd1997179ba9c9ae16f95b5889a6a Nov 24 13:41:50 crc kubenswrapper[5039]: I1124 13:41:50.338838 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-qn652" event={"ID":"5a720981-59f4-4a6c-bc6a-ea08f5aa101b","Type":"ContainerStarted","Data":"1c6ee46f8f6b152253e81948b1655dc106dbd1997179ba9c9ae16f95b5889a6a"} Nov 24 13:41:51 crc kubenswrapper[5039]: I1124 13:41:51.227428 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-t8k26"] Nov 24 13:41:51 crc kubenswrapper[5039]: I1124 13:41:51.229628 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t8k26" Nov 24 13:41:51 crc kubenswrapper[5039]: I1124 13:41:51.247738 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-t8k26"] Nov 24 13:41:51 crc kubenswrapper[5039]: I1124 13:41:51.371055 5039 generic.go:334] "Generic (PLEG): container finished" podID="ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" containerID="7c95d3e1805f1234919c73ae83fca3a9f85403a43cb07e971fbc31116ab7dae9" exitCode=0 Nov 24 13:41:51 crc kubenswrapper[5039]: I1124 13:41:51.371362 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347","Type":"ContainerDied","Data":"7c95d3e1805f1234919c73ae83fca3a9f85403a43cb07e971fbc31116ab7dae9"} Nov 24 13:41:51 crc kubenswrapper[5039]: I1124 13:41:51.372532 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9a81aab-1978-4938-84f9-3a5511942ecb-catalog-content\") pod \"certified-operators-t8k26\" (UID: \"b9a81aab-1978-4938-84f9-3a5511942ecb\") " pod="openshift-marketplace/certified-operators-t8k26" Nov 24 13:41:51 crc kubenswrapper[5039]: I1124 13:41:51.372703 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9a81aab-1978-4938-84f9-3a5511942ecb-utilities\") pod \"certified-operators-t8k26\" (UID: \"b9a81aab-1978-4938-84f9-3a5511942ecb\") " pod="openshift-marketplace/certified-operators-t8k26" Nov 24 13:41:51 crc kubenswrapper[5039]: I1124 13:41:51.372749 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6grcg\" (UniqueName: \"kubernetes.io/projected/b9a81aab-1978-4938-84f9-3a5511942ecb-kube-api-access-6grcg\") pod \"certified-operators-t8k26\" (UID: \"b9a81aab-1978-4938-84f9-3a5511942ecb\") " pod="openshift-marketplace/certified-operators-t8k26" Nov 24 13:41:51 crc kubenswrapper[5039]: I1124 13:41:51.474224 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9a81aab-1978-4938-84f9-3a5511942ecb-utilities\") pod \"certified-operators-t8k26\" (UID: \"b9a81aab-1978-4938-84f9-3a5511942ecb\") " pod="openshift-marketplace/certified-operators-t8k26" Nov 24 13:41:51 crc kubenswrapper[5039]: I1124 13:41:51.474308 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6grcg\" (UniqueName: \"kubernetes.io/projected/b9a81aab-1978-4938-84f9-3a5511942ecb-kube-api-access-6grcg\") pod \"certified-operators-t8k26\" (UID: \"b9a81aab-1978-4938-84f9-3a5511942ecb\") " pod="openshift-marketplace/certified-operators-t8k26" Nov 24 13:41:51 crc kubenswrapper[5039]: I1124 13:41:51.474458 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9a81aab-1978-4938-84f9-3a5511942ecb-catalog-content\") pod \"certified-operators-t8k26\" (UID: \"b9a81aab-1978-4938-84f9-3a5511942ecb\") " pod="openshift-marketplace/certified-operators-t8k26" Nov 24 13:41:51 crc kubenswrapper[5039]: I1124 13:41:51.476523 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9a81aab-1978-4938-84f9-3a5511942ecb-utilities\") pod \"certified-operators-t8k26\" (UID: \"b9a81aab-1978-4938-84f9-3a5511942ecb\") " pod="openshift-marketplace/certified-operators-t8k26" Nov 24 13:41:51 crc kubenswrapper[5039]: I1124 13:41:51.478280 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9a81aab-1978-4938-84f9-3a5511942ecb-catalog-content\") pod \"certified-operators-t8k26\" (UID: \"b9a81aab-1978-4938-84f9-3a5511942ecb\") " pod="openshift-marketplace/certified-operators-t8k26" Nov 24 13:41:51 crc kubenswrapper[5039]: I1124 13:41:51.505033 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6grcg\" (UniqueName: \"kubernetes.io/projected/b9a81aab-1978-4938-84f9-3a5511942ecb-kube-api-access-6grcg\") pod \"certified-operators-t8k26\" (UID: \"b9a81aab-1978-4938-84f9-3a5511942ecb\") " pod="openshift-marketplace/certified-operators-t8k26" Nov 24 13:41:51 crc kubenswrapper[5039]: I1124 13:41:51.552107 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t8k26" Nov 24 13:41:51 crc kubenswrapper[5039]: I1124 13:41:51.963312 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.042651 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-c9b799fcd-wrw68"] Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.227941 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.349682 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-86b5f44b95-4qtzc"] Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.441071 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.457399 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347","Type":"ContainerDied","Data":"6a5fcf5d33bb9f71c923f847f121a8560c7103f360f3c00560156e0f457c34fd"} Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.457461 5039 scope.go:117] "RemoveContainer" containerID="91bd8c255ae50cdf8a129746e8c813d20eb3b3f91634ce54fce3728b61e80606" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.503671 5039 scope.go:117] "RemoveContainer" containerID="f7b51fcf9a4ac54c159cc943b9ba66e512df214012f946a413a6ba4ee2c56827" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.514663 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-config-data\") pod \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.514784 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-log-httpd\") pod \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.515694 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" (UID: "ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.516850 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" (UID: "ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.514818 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-run-httpd\") pod \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.517033 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-sg-core-conf-yaml\") pod \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.517094 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-combined-ca-bundle\") pod \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.517143 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-scripts\") pod \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.517214 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fbmbb\" (UniqueName: \"kubernetes.io/projected/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-kube-api-access-fbmbb\") pod \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\" (UID: \"ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347\") " Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.529965 5039 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.530002 5039 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.536550 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-scripts" (OuterVolumeSpecName: "scripts") pod "ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" (UID: "ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.564684 5039 scope.go:117] "RemoveContainer" containerID="4fce2b5786bdf3d14448c9e215699a029e0ab5a89b68610ee853a47a0a5197b7" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.564834 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-kube-api-access-fbmbb" (OuterVolumeSpecName: "kube-api-access-fbmbb") pod "ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" (UID: "ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347"). InnerVolumeSpecName "kube-api-access-fbmbb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.634240 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.634588 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fbmbb\" (UniqueName: \"kubernetes.io/projected/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-kube-api-access-fbmbb\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.676660 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" (UID: "ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.737090 5039 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.748195 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-c9b799fcd-wrw68" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.749709 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-config-data" (OuterVolumeSpecName: "config-data") pod "ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" (UID: "ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.772479 5039 scope.go:117] "RemoveContainer" containerID="7c95d3e1805f1234919c73ae83fca3a9f85403a43cb07e971fbc31116ab7dae9" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.780612 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" (UID: "ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.806995 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-86b5f44b95-4qtzc" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.838051 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09227c0c-ba64-4216-8bac-a8c0f88706c3-config-data\") pod \"09227c0c-ba64-4216-8bac-a8c0f88706c3\" (UID: \"09227c0c-ba64-4216-8bac-a8c0f88706c3\") " Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.838190 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xpnfl\" (UniqueName: \"kubernetes.io/projected/09227c0c-ba64-4216-8bac-a8c0f88706c3-kube-api-access-xpnfl\") pod \"09227c0c-ba64-4216-8bac-a8c0f88706c3\" (UID: \"09227c0c-ba64-4216-8bac-a8c0f88706c3\") " Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.838275 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qhchp\" (UniqueName: \"kubernetes.io/projected/291fbee7-ab14-439c-9bfc-845225d607ae-kube-api-access-qhchp\") pod \"291fbee7-ab14-439c-9bfc-845225d607ae\" (UID: \"291fbee7-ab14-439c-9bfc-845225d607ae\") " Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.838413 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09227c0c-ba64-4216-8bac-a8c0f88706c3-config-data-custom\") pod \"09227c0c-ba64-4216-8bac-a8c0f88706c3\" (UID: \"09227c0c-ba64-4216-8bac-a8c0f88706c3\") " Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.838521 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/291fbee7-ab14-439c-9bfc-845225d607ae-config-data\") pod \"291fbee7-ab14-439c-9bfc-845225d607ae\" (UID: \"291fbee7-ab14-439c-9bfc-845225d607ae\") " Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.839033 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/291fbee7-ab14-439c-9bfc-845225d607ae-combined-ca-bundle\") pod \"291fbee7-ab14-439c-9bfc-845225d607ae\" (UID: \"291fbee7-ab14-439c-9bfc-845225d607ae\") " Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.839089 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/291fbee7-ab14-439c-9bfc-845225d607ae-config-data-custom\") pod \"291fbee7-ab14-439c-9bfc-845225d607ae\" (UID: \"291fbee7-ab14-439c-9bfc-845225d607ae\") " Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.839533 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09227c0c-ba64-4216-8bac-a8c0f88706c3-combined-ca-bundle\") pod \"09227c0c-ba64-4216-8bac-a8c0f88706c3\" (UID: \"09227c0c-ba64-4216-8bac-a8c0f88706c3\") " Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.840971 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.841090 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.842180 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09227c0c-ba64-4216-8bac-a8c0f88706c3-kube-api-access-xpnfl" (OuterVolumeSpecName: "kube-api-access-xpnfl") pod "09227c0c-ba64-4216-8bac-a8c0f88706c3" (UID: "09227c0c-ba64-4216-8bac-a8c0f88706c3"). InnerVolumeSpecName "kube-api-access-xpnfl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.842576 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/291fbee7-ab14-439c-9bfc-845225d607ae-kube-api-access-qhchp" (OuterVolumeSpecName: "kube-api-access-qhchp") pod "291fbee7-ab14-439c-9bfc-845225d607ae" (UID: "291fbee7-ab14-439c-9bfc-845225d607ae"). InnerVolumeSpecName "kube-api-access-qhchp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.844899 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/291fbee7-ab14-439c-9bfc-845225d607ae-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "291fbee7-ab14-439c-9bfc-845225d607ae" (UID: "291fbee7-ab14-439c-9bfc-845225d607ae"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.849176 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09227c0c-ba64-4216-8bac-a8c0f88706c3-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "09227c0c-ba64-4216-8bac-a8c0f88706c3" (UID: "09227c0c-ba64-4216-8bac-a8c0f88706c3"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.885812 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/291fbee7-ab14-439c-9bfc-845225d607ae-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "291fbee7-ab14-439c-9bfc-845225d607ae" (UID: "291fbee7-ab14-439c-9bfc-845225d607ae"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.897160 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09227c0c-ba64-4216-8bac-a8c0f88706c3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "09227c0c-ba64-4216-8bac-a8c0f88706c3" (UID: "09227c0c-ba64-4216-8bac-a8c0f88706c3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.907694 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09227c0c-ba64-4216-8bac-a8c0f88706c3-config-data" (OuterVolumeSpecName: "config-data") pod "09227c0c-ba64-4216-8bac-a8c0f88706c3" (UID: "09227c0c-ba64-4216-8bac-a8c0f88706c3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.926676 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/291fbee7-ab14-439c-9bfc-845225d607ae-config-data" (OuterVolumeSpecName: "config-data") pod "291fbee7-ab14-439c-9bfc-845225d607ae" (UID: "291fbee7-ab14-439c-9bfc-845225d607ae"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.943101 5039 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/291fbee7-ab14-439c-9bfc-845225d607ae-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.943139 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09227c0c-ba64-4216-8bac-a8c0f88706c3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.943152 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09227c0c-ba64-4216-8bac-a8c0f88706c3-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.943165 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xpnfl\" (UniqueName: \"kubernetes.io/projected/09227c0c-ba64-4216-8bac-a8c0f88706c3-kube-api-access-xpnfl\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.943178 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qhchp\" (UniqueName: \"kubernetes.io/projected/291fbee7-ab14-439c-9bfc-845225d607ae-kube-api-access-qhchp\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.943193 5039 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09227c0c-ba64-4216-8bac-a8c0f88706c3-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.943204 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/291fbee7-ab14-439c-9bfc-845225d607ae-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:52 crc kubenswrapper[5039]: I1124 13:41:52.943214 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/291fbee7-ab14-439c-9bfc-845225d607ae-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.116981 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-t8k26"] Nov 24 13:41:53 crc kubenswrapper[5039]: W1124 13:41:53.119883 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb9a81aab_1978_4938_84f9_3a5511942ecb.slice/crio-c5d276de16f8aa116c65efccc77b15c1b251c58e8359c612cb58abeda15b29c1 WatchSource:0}: Error finding container c5d276de16f8aa116c65efccc77b15c1b251c58e8359c612cb58abeda15b29c1: Status 404 returned error can't find the container with id c5d276de16f8aa116c65efccc77b15c1b251c58e8359c612cb58abeda15b29c1 Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.468853 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-c9b799fcd-wrw68" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.468853 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-c9b799fcd-wrw68" event={"ID":"291fbee7-ab14-439c-9bfc-845225d607ae","Type":"ContainerDied","Data":"f21bd9b13a51c06675c9e1b18ca0329c9529bd744822f7998a4beb5704d2c174"} Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.469273 5039 scope.go:117] "RemoveContainer" containerID="2eeb7af41a0131608cb59977d943a6f25b03d22c875272db1765e5385e1cab19" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.473261 5039 generic.go:334] "Generic (PLEG): container finished" podID="b9a81aab-1978-4938-84f9-3a5511942ecb" containerID="60e5d4c578a56b52977bedad21f1c3c88e964cc1979fac5f1ff10b23aebf98dc" exitCode=0 Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.473335 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t8k26" event={"ID":"b9a81aab-1978-4938-84f9-3a5511942ecb","Type":"ContainerDied","Data":"60e5d4c578a56b52977bedad21f1c3c88e964cc1979fac5f1ff10b23aebf98dc"} Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.473357 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t8k26" event={"ID":"b9a81aab-1978-4938-84f9-3a5511942ecb","Type":"ContainerStarted","Data":"c5d276de16f8aa116c65efccc77b15c1b251c58e8359c612cb58abeda15b29c1"} Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.482857 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-86b5f44b95-4qtzc" event={"ID":"09227c0c-ba64-4216-8bac-a8c0f88706c3","Type":"ContainerDied","Data":"802251b6c48a07463bfd79ba34e64a026619afda0553e2cdcc723cc67c2ef2e3"} Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.482937 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-86b5f44b95-4qtzc" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.491291 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.523997 5039 scope.go:117] "RemoveContainer" containerID="b9305e61a2db1af7e2f870371d4363db7cdebd32f06e05c0c2227539dbfa2707" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.654520 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.677178 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.688317 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-c9b799fcd-wrw68"] Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.709204 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-c9b799fcd-wrw68"] Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.737737 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-86b5f44b95-4qtzc"] Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.755566 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-86b5f44b95-4qtzc"] Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.768001 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:41:53 crc kubenswrapper[5039]: E1124 13:41:53.768535 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="291fbee7-ab14-439c-9bfc-845225d607ae" containerName="heat-cfnapi" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.768550 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="291fbee7-ab14-439c-9bfc-845225d607ae" containerName="heat-cfnapi" Nov 24 13:41:53 crc kubenswrapper[5039]: E1124 13:41:53.768566 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="291fbee7-ab14-439c-9bfc-845225d607ae" containerName="heat-cfnapi" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.768573 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="291fbee7-ab14-439c-9bfc-845225d607ae" containerName="heat-cfnapi" Nov 24 13:41:53 crc kubenswrapper[5039]: E1124 13:41:53.768583 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" containerName="ceilometer-notification-agent" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.768590 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" containerName="ceilometer-notification-agent" Nov 24 13:41:53 crc kubenswrapper[5039]: E1124 13:41:53.768611 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09227c0c-ba64-4216-8bac-a8c0f88706c3" containerName="heat-api" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.768619 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="09227c0c-ba64-4216-8bac-a8c0f88706c3" containerName="heat-api" Nov 24 13:41:53 crc kubenswrapper[5039]: E1124 13:41:53.768630 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" containerName="proxy-httpd" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.768637 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" containerName="proxy-httpd" Nov 24 13:41:53 crc kubenswrapper[5039]: E1124 13:41:53.768651 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09227c0c-ba64-4216-8bac-a8c0f88706c3" containerName="heat-api" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.768658 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="09227c0c-ba64-4216-8bac-a8c0f88706c3" containerName="heat-api" Nov 24 13:41:53 crc kubenswrapper[5039]: E1124 13:41:53.768674 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" containerName="sg-core" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.768681 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" containerName="sg-core" Nov 24 13:41:53 crc kubenswrapper[5039]: E1124 13:41:53.768712 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" containerName="ceilometer-central-agent" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.768719 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" containerName="ceilometer-central-agent" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.768958 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="09227c0c-ba64-4216-8bac-a8c0f88706c3" containerName="heat-api" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.768973 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" containerName="ceilometer-central-agent" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.768998 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" containerName="ceilometer-notification-agent" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.769010 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="291fbee7-ab14-439c-9bfc-845225d607ae" containerName="heat-cfnapi" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.769019 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="291fbee7-ab14-439c-9bfc-845225d607ae" containerName="heat-cfnapi" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.769036 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" containerName="sg-core" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.769053 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" containerName="proxy-httpd" Nov 24 13:41:53 crc kubenswrapper[5039]: E1124 13:41:53.769274 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="291fbee7-ab14-439c-9bfc-845225d607ae" containerName="heat-cfnapi" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.769285 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="291fbee7-ab14-439c-9bfc-845225d607ae" containerName="heat-cfnapi" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.769600 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="09227c0c-ba64-4216-8bac-a8c0f88706c3" containerName="heat-api" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.769621 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="291fbee7-ab14-439c-9bfc-845225d607ae" containerName="heat-cfnapi" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.775306 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.775605 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.779980 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.780198 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.887770 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0de84706-a2dc-4680-83ba-324f8b41e3b0-run-httpd\") pod \"ceilometer-0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " pod="openstack/ceilometer-0" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.888079 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " pod="openstack/ceilometer-0" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.888146 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-config-data\") pod \"ceilometer-0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " pod="openstack/ceilometer-0" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.888168 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " pod="openstack/ceilometer-0" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.888183 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-scripts\") pod \"ceilometer-0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " pod="openstack/ceilometer-0" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.888204 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0de84706-a2dc-4680-83ba-324f8b41e3b0-log-httpd\") pod \"ceilometer-0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " pod="openstack/ceilometer-0" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.888232 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5t2n\" (UniqueName: \"kubernetes.io/projected/0de84706-a2dc-4680-83ba-324f8b41e3b0-kube-api-access-l5t2n\") pod \"ceilometer-0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " pod="openstack/ceilometer-0" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.990216 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " pod="openstack/ceilometer-0" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.990320 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-config-data\") pod \"ceilometer-0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " pod="openstack/ceilometer-0" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.990353 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " pod="openstack/ceilometer-0" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.990378 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-scripts\") pod \"ceilometer-0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " pod="openstack/ceilometer-0" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.990408 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0de84706-a2dc-4680-83ba-324f8b41e3b0-log-httpd\") pod \"ceilometer-0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " pod="openstack/ceilometer-0" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.990442 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5t2n\" (UniqueName: \"kubernetes.io/projected/0de84706-a2dc-4680-83ba-324f8b41e3b0-kube-api-access-l5t2n\") pod \"ceilometer-0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " pod="openstack/ceilometer-0" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.990589 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0de84706-a2dc-4680-83ba-324f8b41e3b0-run-httpd\") pod \"ceilometer-0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " pod="openstack/ceilometer-0" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.991135 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0de84706-a2dc-4680-83ba-324f8b41e3b0-run-httpd\") pod \"ceilometer-0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " pod="openstack/ceilometer-0" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.991402 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0de84706-a2dc-4680-83ba-324f8b41e3b0-log-httpd\") pod \"ceilometer-0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " pod="openstack/ceilometer-0" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.998190 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " pod="openstack/ceilometer-0" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.998477 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-scripts\") pod \"ceilometer-0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " pod="openstack/ceilometer-0" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.998811 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " pod="openstack/ceilometer-0" Nov 24 13:41:53 crc kubenswrapper[5039]: I1124 13:41:53.999365 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-config-data\") pod \"ceilometer-0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " pod="openstack/ceilometer-0" Nov 24 13:41:54 crc kubenswrapper[5039]: I1124 13:41:54.023905 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5t2n\" (UniqueName: \"kubernetes.io/projected/0de84706-a2dc-4680-83ba-324f8b41e3b0-kube-api-access-l5t2n\") pod \"ceilometer-0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " pod="openstack/ceilometer-0" Nov 24 13:41:54 crc kubenswrapper[5039]: I1124 13:41:54.155178 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:41:54 crc kubenswrapper[5039]: I1124 13:41:54.328912 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09227c0c-ba64-4216-8bac-a8c0f88706c3" path="/var/lib/kubelet/pods/09227c0c-ba64-4216-8bac-a8c0f88706c3/volumes" Nov 24 13:41:54 crc kubenswrapper[5039]: I1124 13:41:54.329895 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="291fbee7-ab14-439c-9bfc-845225d607ae" path="/var/lib/kubelet/pods/291fbee7-ab14-439c-9bfc-845225d607ae/volumes" Nov 24 13:41:54 crc kubenswrapper[5039]: I1124 13:41:54.330549 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347" path="/var/lib/kubelet/pods/ec6d7ba5-d19b-4aa5-b2bd-f5b66aba3347/volumes" Nov 24 13:41:54 crc kubenswrapper[5039]: I1124 13:41:54.512812 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t8k26" event={"ID":"b9a81aab-1978-4938-84f9-3a5511942ecb","Type":"ContainerStarted","Data":"006f0d90187e613602c5a4fd67e7298c66ddda83ed0f96974f35db44b9d4e266"} Nov 24 13:41:54 crc kubenswrapper[5039]: I1124 13:41:54.626199 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:41:55 crc kubenswrapper[5039]: I1124 13:41:55.174934 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4ctcm" Nov 24 13:41:55 crc kubenswrapper[5039]: I1124 13:41:55.174980 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4ctcm" Nov 24 13:41:55 crc kubenswrapper[5039]: I1124 13:41:55.242271 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4ctcm" Nov 24 13:41:55 crc kubenswrapper[5039]: I1124 13:41:55.385782 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:41:55 crc kubenswrapper[5039]: I1124 13:41:55.554119 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0de84706-a2dc-4680-83ba-324f8b41e3b0","Type":"ContainerStarted","Data":"283efecfb93cb8d465cab3a40805d1c1c324d939e1ae4f88af2ac476cdfe9c0a"} Nov 24 13:41:55 crc kubenswrapper[5039]: I1124 13:41:55.572801 5039 generic.go:334] "Generic (PLEG): container finished" podID="b9a81aab-1978-4938-84f9-3a5511942ecb" containerID="006f0d90187e613602c5a4fd67e7298c66ddda83ed0f96974f35db44b9d4e266" exitCode=0 Nov 24 13:41:55 crc kubenswrapper[5039]: I1124 13:41:55.574874 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t8k26" event={"ID":"b9a81aab-1978-4938-84f9-3a5511942ecb","Type":"ContainerDied","Data":"006f0d90187e613602c5a4fd67e7298c66ddda83ed0f96974f35db44b9d4e266"} Nov 24 13:41:55 crc kubenswrapper[5039]: I1124 13:41:55.667745 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4ctcm" Nov 24 13:41:56 crc kubenswrapper[5039]: I1124 13:41:56.585517 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0de84706-a2dc-4680-83ba-324f8b41e3b0","Type":"ContainerStarted","Data":"608c639a441065c5f92ad5328f1c52c10b4669e125f9523e6c9d96ccd9a4ff99"} Nov 24 13:41:57 crc kubenswrapper[5039]: I1124 13:41:57.613460 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4ctcm"] Nov 24 13:41:57 crc kubenswrapper[5039]: I1124 13:41:57.613977 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4ctcm" podUID="c7cdffa6-8fc4-44df-b52a-20f44c069d4d" containerName="registry-server" containerID="cri-o://95b4023bb5bcdade330eae161170db7cc6d3f30d59f5262423536395acbe2576" gracePeriod=2 Nov 24 13:41:58 crc kubenswrapper[5039]: E1124 13:41:58.002245 5039 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cc040e658a5e864f49b6a89bf502538beec554a4a79a4c2d235f2963492cdd7e" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 24 13:41:58 crc kubenswrapper[5039]: E1124 13:41:58.004254 5039 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cc040e658a5e864f49b6a89bf502538beec554a4a79a4c2d235f2963492cdd7e" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 24 13:41:58 crc kubenswrapper[5039]: E1124 13:41:58.005553 5039 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cc040e658a5e864f49b6a89bf502538beec554a4a79a4c2d235f2963492cdd7e" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 24 13:41:58 crc kubenswrapper[5039]: E1124 13:41:58.005616 5039 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-78dc996954-b5s9v" podUID="8625b618-e756-46ad-a646-c94e824a1e83" containerName="heat-engine" Nov 24 13:41:58 crc kubenswrapper[5039]: I1124 13:41:58.607863 5039 generic.go:334] "Generic (PLEG): container finished" podID="8625b618-e756-46ad-a646-c94e824a1e83" containerID="cc040e658a5e864f49b6a89bf502538beec554a4a79a4c2d235f2963492cdd7e" exitCode=0 Nov 24 13:41:58 crc kubenswrapper[5039]: I1124 13:41:58.607933 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-78dc996954-b5s9v" event={"ID":"8625b618-e756-46ad-a646-c94e824a1e83","Type":"ContainerDied","Data":"cc040e658a5e864f49b6a89bf502538beec554a4a79a4c2d235f2963492cdd7e"} Nov 24 13:41:58 crc kubenswrapper[5039]: I1124 13:41:58.610200 5039 generic.go:334] "Generic (PLEG): container finished" podID="c7cdffa6-8fc4-44df-b52a-20f44c069d4d" containerID="95b4023bb5bcdade330eae161170db7cc6d3f30d59f5262423536395acbe2576" exitCode=0 Nov 24 13:41:58 crc kubenswrapper[5039]: I1124 13:41:58.610261 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ctcm" event={"ID":"c7cdffa6-8fc4-44df-b52a-20f44c069d4d","Type":"ContainerDied","Data":"95b4023bb5bcdade330eae161170db7cc6d3f30d59f5262423536395acbe2576"} Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.511541 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4ctcm" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.607130 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-78dc996954-b5s9v" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.625274 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7cdffa6-8fc4-44df-b52a-20f44c069d4d-utilities\") pod \"c7cdffa6-8fc4-44df-b52a-20f44c069d4d\" (UID: \"c7cdffa6-8fc4-44df-b52a-20f44c069d4d\") " Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.625543 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nh75f\" (UniqueName: \"kubernetes.io/projected/c7cdffa6-8fc4-44df-b52a-20f44c069d4d-kube-api-access-nh75f\") pod \"c7cdffa6-8fc4-44df-b52a-20f44c069d4d\" (UID: \"c7cdffa6-8fc4-44df-b52a-20f44c069d4d\") " Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.625578 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7cdffa6-8fc4-44df-b52a-20f44c069d4d-catalog-content\") pod \"c7cdffa6-8fc4-44df-b52a-20f44c069d4d\" (UID: \"c7cdffa6-8fc4-44df-b52a-20f44c069d4d\") " Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.627475 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7cdffa6-8fc4-44df-b52a-20f44c069d4d-utilities" (OuterVolumeSpecName: "utilities") pod "c7cdffa6-8fc4-44df-b52a-20f44c069d4d" (UID: "c7cdffa6-8fc4-44df-b52a-20f44c069d4d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.649576 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7cdffa6-8fc4-44df-b52a-20f44c069d4d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c7cdffa6-8fc4-44df-b52a-20f44c069d4d" (UID: "c7cdffa6-8fc4-44df-b52a-20f44c069d4d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.662631 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7cdffa6-8fc4-44df-b52a-20f44c069d4d-kube-api-access-nh75f" (OuterVolumeSpecName: "kube-api-access-nh75f") pod "c7cdffa6-8fc4-44df-b52a-20f44c069d4d" (UID: "c7cdffa6-8fc4-44df-b52a-20f44c069d4d"). InnerVolumeSpecName "kube-api-access-nh75f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.668405 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-qn652" event={"ID":"5a720981-59f4-4a6c-bc6a-ea08f5aa101b","Type":"ContainerStarted","Data":"0bc29d7ec77e50a067302d1c7c1db679819a0c41054ca499b314238e31599ff7"} Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.676219 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t8k26" event={"ID":"b9a81aab-1978-4938-84f9-3a5511942ecb","Type":"ContainerStarted","Data":"40ebf768cb21e9ca3ed53e2047726477d380fd74584603614b82c8e7c9a9fc0b"} Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.693200 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ctcm" event={"ID":"c7cdffa6-8fc4-44df-b52a-20f44c069d4d","Type":"ContainerDied","Data":"21a218f3341e0d2211256f53758c1c19cc28e43ae1662eaa695743337a1808de"} Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.693257 5039 scope.go:117] "RemoveContainer" containerID="95b4023bb5bcdade330eae161170db7cc6d3f30d59f5262423536395acbe2576" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.693387 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4ctcm" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.695277 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-qn652" podStartSLOduration=2.347005976 podStartE2EDuration="15.695264384s" podCreationTimestamp="2025-11-24 13:41:48 +0000 UTC" firstStartedPulling="2025-11-24 13:41:49.888310894 +0000 UTC m=+1422.327435394" lastFinishedPulling="2025-11-24 13:42:03.236569302 +0000 UTC m=+1435.675693802" observedRunningTime="2025-11-24 13:42:03.684203021 +0000 UTC m=+1436.123327541" watchObservedRunningTime="2025-11-24 13:42:03.695264384 +0000 UTC m=+1436.134388884" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.702137 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-78dc996954-b5s9v" event={"ID":"8625b618-e756-46ad-a646-c94e824a1e83","Type":"ContainerDied","Data":"f3e0f8e511fe49088ea2841775ae09bcbb861af077f09a9e4127f93d23ed74e3"} Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.702156 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-78dc996954-b5s9v" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.712276 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0de84706-a2dc-4680-83ba-324f8b41e3b0","Type":"ContainerStarted","Data":"e15358e1677425bf1663a74aef6bd160b643b24c545b67b96a396a0234623c2f"} Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.715841 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-t8k26" podStartSLOduration=2.964406014 podStartE2EDuration="12.715824241s" podCreationTimestamp="2025-11-24 13:41:51 +0000 UTC" firstStartedPulling="2025-11-24 13:41:53.477842315 +0000 UTC m=+1425.916966815" lastFinishedPulling="2025-11-24 13:42:03.229260542 +0000 UTC m=+1435.668385042" observedRunningTime="2025-11-24 13:42:03.702090412 +0000 UTC m=+1436.141214922" watchObservedRunningTime="2025-11-24 13:42:03.715824241 +0000 UTC m=+1436.154948741" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.727746 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8625b618-e756-46ad-a646-c94e824a1e83-config-data-custom\") pod \"8625b618-e756-46ad-a646-c94e824a1e83\" (UID: \"8625b618-e756-46ad-a646-c94e824a1e83\") " Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.727863 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dcx8t\" (UniqueName: \"kubernetes.io/projected/8625b618-e756-46ad-a646-c94e824a1e83-kube-api-access-dcx8t\") pod \"8625b618-e756-46ad-a646-c94e824a1e83\" (UID: \"8625b618-e756-46ad-a646-c94e824a1e83\") " Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.727933 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8625b618-e756-46ad-a646-c94e824a1e83-config-data\") pod \"8625b618-e756-46ad-a646-c94e824a1e83\" (UID: \"8625b618-e756-46ad-a646-c94e824a1e83\") " Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.728024 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8625b618-e756-46ad-a646-c94e824a1e83-combined-ca-bundle\") pod \"8625b618-e756-46ad-a646-c94e824a1e83\" (UID: \"8625b618-e756-46ad-a646-c94e824a1e83\") " Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.728697 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nh75f\" (UniqueName: \"kubernetes.io/projected/c7cdffa6-8fc4-44df-b52a-20f44c069d4d-kube-api-access-nh75f\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.728716 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7cdffa6-8fc4-44df-b52a-20f44c069d4d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.728727 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7cdffa6-8fc4-44df-b52a-20f44c069d4d-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.743562 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8625b618-e756-46ad-a646-c94e824a1e83-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "8625b618-e756-46ad-a646-c94e824a1e83" (UID: "8625b618-e756-46ad-a646-c94e824a1e83"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.749313 5039 scope.go:117] "RemoveContainer" containerID="bc5d7b9d1f08719a82a5fbdd51034af3029cd5ab07ae23600f01ebf340063980" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.750089 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4ctcm"] Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.758829 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8625b618-e756-46ad-a646-c94e824a1e83-kube-api-access-dcx8t" (OuterVolumeSpecName: "kube-api-access-dcx8t") pod "8625b618-e756-46ad-a646-c94e824a1e83" (UID: "8625b618-e756-46ad-a646-c94e824a1e83"). InnerVolumeSpecName "kube-api-access-dcx8t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.759879 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4ctcm"] Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.776571 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8625b618-e756-46ad-a646-c94e824a1e83-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8625b618-e756-46ad-a646-c94e824a1e83" (UID: "8625b618-e756-46ad-a646-c94e824a1e83"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.787660 5039 scope.go:117] "RemoveContainer" containerID="db989634c7eaab05ad07fabb57cce29c3483d8d86c9701e7004fa2cecd442672" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.803833 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8625b618-e756-46ad-a646-c94e824a1e83-config-data" (OuterVolumeSpecName: "config-data") pod "8625b618-e756-46ad-a646-c94e824a1e83" (UID: "8625b618-e756-46ad-a646-c94e824a1e83"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.818685 5039 scope.go:117] "RemoveContainer" containerID="cc040e658a5e864f49b6a89bf502538beec554a4a79a4c2d235f2963492cdd7e" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.833521 5039 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8625b618-e756-46ad-a646-c94e824a1e83-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.833569 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dcx8t\" (UniqueName: \"kubernetes.io/projected/8625b618-e756-46ad-a646-c94e824a1e83-kube-api-access-dcx8t\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.833582 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8625b618-e756-46ad-a646-c94e824a1e83-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:03 crc kubenswrapper[5039]: I1124 13:42:03.833593 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8625b618-e756-46ad-a646-c94e824a1e83-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:04 crc kubenswrapper[5039]: I1124 13:42:04.059678 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-78dc996954-b5s9v"] Nov 24 13:42:04 crc kubenswrapper[5039]: I1124 13:42:04.076136 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-78dc996954-b5s9v"] Nov 24 13:42:04 crc kubenswrapper[5039]: I1124 13:42:04.322264 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8625b618-e756-46ad-a646-c94e824a1e83" path="/var/lib/kubelet/pods/8625b618-e756-46ad-a646-c94e824a1e83/volumes" Nov 24 13:42:04 crc kubenswrapper[5039]: I1124 13:42:04.323454 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7cdffa6-8fc4-44df-b52a-20f44c069d4d" path="/var/lib/kubelet/pods/c7cdffa6-8fc4-44df-b52a-20f44c069d4d/volumes" Nov 24 13:42:04 crc kubenswrapper[5039]: I1124 13:42:04.732667 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0de84706-a2dc-4680-83ba-324f8b41e3b0","Type":"ContainerStarted","Data":"3aa05fa45c905200e3588acaea885943297a7d77f91ad3cfb3278df4805a66e0"} Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.763587 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-svzch"] Nov 24 13:42:06 crc kubenswrapper[5039]: E1124 13:42:06.764307 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7cdffa6-8fc4-44df-b52a-20f44c069d4d" containerName="extract-utilities" Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.764319 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7cdffa6-8fc4-44df-b52a-20f44c069d4d" containerName="extract-utilities" Nov 24 13:42:06 crc kubenswrapper[5039]: E1124 13:42:06.764338 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7cdffa6-8fc4-44df-b52a-20f44c069d4d" containerName="extract-content" Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.764344 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7cdffa6-8fc4-44df-b52a-20f44c069d4d" containerName="extract-content" Nov 24 13:42:06 crc kubenswrapper[5039]: E1124 13:42:06.764354 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8625b618-e756-46ad-a646-c94e824a1e83" containerName="heat-engine" Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.764360 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8625b618-e756-46ad-a646-c94e824a1e83" containerName="heat-engine" Nov 24 13:42:06 crc kubenswrapper[5039]: E1124 13:42:06.764369 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7cdffa6-8fc4-44df-b52a-20f44c069d4d" containerName="registry-server" Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.764374 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7cdffa6-8fc4-44df-b52a-20f44c069d4d" containerName="registry-server" Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.764590 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7cdffa6-8fc4-44df-b52a-20f44c069d4d" containerName="registry-server" Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.764599 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="8625b618-e756-46ad-a646-c94e824a1e83" containerName="heat-engine" Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.766511 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-svzch" Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.775259 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0de84706-a2dc-4680-83ba-324f8b41e3b0","Type":"ContainerStarted","Data":"1ab32fd8577eb452c7fdf1a261b11cb6733ec93b213d557d50512aab1c04a245"} Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.775424 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0de84706-a2dc-4680-83ba-324f8b41e3b0" containerName="ceilometer-central-agent" containerID="cri-o://608c639a441065c5f92ad5328f1c52c10b4669e125f9523e6c9d96ccd9a4ff99" gracePeriod=30 Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.775692 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0de84706-a2dc-4680-83ba-324f8b41e3b0" containerName="proxy-httpd" containerID="cri-o://1ab32fd8577eb452c7fdf1a261b11cb6733ec93b213d557d50512aab1c04a245" gracePeriod=30 Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.775700 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.775762 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0de84706-a2dc-4680-83ba-324f8b41e3b0" containerName="sg-core" containerID="cri-o://3aa05fa45c905200e3588acaea885943297a7d77f91ad3cfb3278df4805a66e0" gracePeriod=30 Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.775781 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0de84706-a2dc-4680-83ba-324f8b41e3b0" containerName="ceilometer-notification-agent" containerID="cri-o://e15358e1677425bf1663a74aef6bd160b643b24c545b67b96a396a0234623c2f" gracePeriod=30 Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.776152 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-svzch"] Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.803496 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxvkp\" (UniqueName: \"kubernetes.io/projected/5e1f5668-c1c5-4a87-aec0-32a153351cf1-kube-api-access-pxvkp\") pod \"community-operators-svzch\" (UID: \"5e1f5668-c1c5-4a87-aec0-32a153351cf1\") " pod="openshift-marketplace/community-operators-svzch" Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.803590 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e1f5668-c1c5-4a87-aec0-32a153351cf1-catalog-content\") pod \"community-operators-svzch\" (UID: \"5e1f5668-c1c5-4a87-aec0-32a153351cf1\") " pod="openshift-marketplace/community-operators-svzch" Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.803744 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e1f5668-c1c5-4a87-aec0-32a153351cf1-utilities\") pod \"community-operators-svzch\" (UID: \"5e1f5668-c1c5-4a87-aec0-32a153351cf1\") " pod="openshift-marketplace/community-operators-svzch" Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.832934 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.946926081 podStartE2EDuration="13.832913754s" podCreationTimestamp="2025-11-24 13:41:53 +0000 UTC" firstStartedPulling="2025-11-24 13:41:54.663220451 +0000 UTC m=+1427.102344951" lastFinishedPulling="2025-11-24 13:42:05.549208134 +0000 UTC m=+1437.988332624" observedRunningTime="2025-11-24 13:42:06.819030062 +0000 UTC m=+1439.258154562" watchObservedRunningTime="2025-11-24 13:42:06.832913754 +0000 UTC m=+1439.272038254" Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.906089 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e1f5668-c1c5-4a87-aec0-32a153351cf1-utilities\") pod \"community-operators-svzch\" (UID: \"5e1f5668-c1c5-4a87-aec0-32a153351cf1\") " pod="openshift-marketplace/community-operators-svzch" Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.906268 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxvkp\" (UniqueName: \"kubernetes.io/projected/5e1f5668-c1c5-4a87-aec0-32a153351cf1-kube-api-access-pxvkp\") pod \"community-operators-svzch\" (UID: \"5e1f5668-c1c5-4a87-aec0-32a153351cf1\") " pod="openshift-marketplace/community-operators-svzch" Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.906327 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e1f5668-c1c5-4a87-aec0-32a153351cf1-catalog-content\") pod \"community-operators-svzch\" (UID: \"5e1f5668-c1c5-4a87-aec0-32a153351cf1\") " pod="openshift-marketplace/community-operators-svzch" Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.906935 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e1f5668-c1c5-4a87-aec0-32a153351cf1-utilities\") pod \"community-operators-svzch\" (UID: \"5e1f5668-c1c5-4a87-aec0-32a153351cf1\") " pod="openshift-marketplace/community-operators-svzch" Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.907084 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e1f5668-c1c5-4a87-aec0-32a153351cf1-catalog-content\") pod \"community-operators-svzch\" (UID: \"5e1f5668-c1c5-4a87-aec0-32a153351cf1\") " pod="openshift-marketplace/community-operators-svzch" Nov 24 13:42:06 crc kubenswrapper[5039]: I1124 13:42:06.940534 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxvkp\" (UniqueName: \"kubernetes.io/projected/5e1f5668-c1c5-4a87-aec0-32a153351cf1-kube-api-access-pxvkp\") pod \"community-operators-svzch\" (UID: \"5e1f5668-c1c5-4a87-aec0-32a153351cf1\") " pod="openshift-marketplace/community-operators-svzch" Nov 24 13:42:07 crc kubenswrapper[5039]: I1124 13:42:07.102092 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-svzch" Nov 24 13:42:07 crc kubenswrapper[5039]: I1124 13:42:07.703000 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-svzch"] Nov 24 13:42:07 crc kubenswrapper[5039]: W1124 13:42:07.737231 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5e1f5668_c1c5_4a87_aec0_32a153351cf1.slice/crio-90ba8204dc56216170aa4cf804edf1cbf6d746afa32700fbbfad7a7ae870cc47 WatchSource:0}: Error finding container 90ba8204dc56216170aa4cf804edf1cbf6d746afa32700fbbfad7a7ae870cc47: Status 404 returned error can't find the container with id 90ba8204dc56216170aa4cf804edf1cbf6d746afa32700fbbfad7a7ae870cc47 Nov 24 13:42:07 crc kubenswrapper[5039]: I1124 13:42:07.788055 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-svzch" event={"ID":"5e1f5668-c1c5-4a87-aec0-32a153351cf1","Type":"ContainerStarted","Data":"90ba8204dc56216170aa4cf804edf1cbf6d746afa32700fbbfad7a7ae870cc47"} Nov 24 13:42:07 crc kubenswrapper[5039]: I1124 13:42:07.791309 5039 generic.go:334] "Generic (PLEG): container finished" podID="0de84706-a2dc-4680-83ba-324f8b41e3b0" containerID="1ab32fd8577eb452c7fdf1a261b11cb6733ec93b213d557d50512aab1c04a245" exitCode=0 Nov 24 13:42:07 crc kubenswrapper[5039]: I1124 13:42:07.791344 5039 generic.go:334] "Generic (PLEG): container finished" podID="0de84706-a2dc-4680-83ba-324f8b41e3b0" containerID="3aa05fa45c905200e3588acaea885943297a7d77f91ad3cfb3278df4805a66e0" exitCode=2 Nov 24 13:42:07 crc kubenswrapper[5039]: I1124 13:42:07.791354 5039 generic.go:334] "Generic (PLEG): container finished" podID="0de84706-a2dc-4680-83ba-324f8b41e3b0" containerID="e15358e1677425bf1663a74aef6bd160b643b24c545b67b96a396a0234623c2f" exitCode=0 Nov 24 13:42:07 crc kubenswrapper[5039]: I1124 13:42:07.791362 5039 generic.go:334] "Generic (PLEG): container finished" podID="0de84706-a2dc-4680-83ba-324f8b41e3b0" containerID="608c639a441065c5f92ad5328f1c52c10b4669e125f9523e6c9d96ccd9a4ff99" exitCode=0 Nov 24 13:42:07 crc kubenswrapper[5039]: I1124 13:42:07.791385 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0de84706-a2dc-4680-83ba-324f8b41e3b0","Type":"ContainerDied","Data":"1ab32fd8577eb452c7fdf1a261b11cb6733ec93b213d557d50512aab1c04a245"} Nov 24 13:42:07 crc kubenswrapper[5039]: I1124 13:42:07.791414 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0de84706-a2dc-4680-83ba-324f8b41e3b0","Type":"ContainerDied","Data":"3aa05fa45c905200e3588acaea885943297a7d77f91ad3cfb3278df4805a66e0"} Nov 24 13:42:07 crc kubenswrapper[5039]: I1124 13:42:07.791426 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0de84706-a2dc-4680-83ba-324f8b41e3b0","Type":"ContainerDied","Data":"e15358e1677425bf1663a74aef6bd160b643b24c545b67b96a396a0234623c2f"} Nov 24 13:42:07 crc kubenswrapper[5039]: I1124 13:42:07.791436 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0de84706-a2dc-4680-83ba-324f8b41e3b0","Type":"ContainerDied","Data":"608c639a441065c5f92ad5328f1c52c10b4669e125f9523e6c9d96ccd9a4ff99"} Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.128515 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.243978 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-sg-core-conf-yaml\") pod \"0de84706-a2dc-4680-83ba-324f8b41e3b0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.244113 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-combined-ca-bundle\") pod \"0de84706-a2dc-4680-83ba-324f8b41e3b0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.244163 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0de84706-a2dc-4680-83ba-324f8b41e3b0-run-httpd\") pod \"0de84706-a2dc-4680-83ba-324f8b41e3b0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.244190 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-config-data\") pod \"0de84706-a2dc-4680-83ba-324f8b41e3b0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.244290 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0de84706-a2dc-4680-83ba-324f8b41e3b0-log-httpd\") pod \"0de84706-a2dc-4680-83ba-324f8b41e3b0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.244313 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l5t2n\" (UniqueName: \"kubernetes.io/projected/0de84706-a2dc-4680-83ba-324f8b41e3b0-kube-api-access-l5t2n\") pod \"0de84706-a2dc-4680-83ba-324f8b41e3b0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.244352 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-scripts\") pod \"0de84706-a2dc-4680-83ba-324f8b41e3b0\" (UID: \"0de84706-a2dc-4680-83ba-324f8b41e3b0\") " Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.244565 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0de84706-a2dc-4680-83ba-324f8b41e3b0-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0de84706-a2dc-4680-83ba-324f8b41e3b0" (UID: "0de84706-a2dc-4680-83ba-324f8b41e3b0"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.244826 5039 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0de84706-a2dc-4680-83ba-324f8b41e3b0-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.244921 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0de84706-a2dc-4680-83ba-324f8b41e3b0-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0de84706-a2dc-4680-83ba-324f8b41e3b0" (UID: "0de84706-a2dc-4680-83ba-324f8b41e3b0"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.250335 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-scripts" (OuterVolumeSpecName: "scripts") pod "0de84706-a2dc-4680-83ba-324f8b41e3b0" (UID: "0de84706-a2dc-4680-83ba-324f8b41e3b0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.250366 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0de84706-a2dc-4680-83ba-324f8b41e3b0-kube-api-access-l5t2n" (OuterVolumeSpecName: "kube-api-access-l5t2n") pod "0de84706-a2dc-4680-83ba-324f8b41e3b0" (UID: "0de84706-a2dc-4680-83ba-324f8b41e3b0"). InnerVolumeSpecName "kube-api-access-l5t2n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.280197 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0de84706-a2dc-4680-83ba-324f8b41e3b0" (UID: "0de84706-a2dc-4680-83ba-324f8b41e3b0"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.344554 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0de84706-a2dc-4680-83ba-324f8b41e3b0" (UID: "0de84706-a2dc-4680-83ba-324f8b41e3b0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.346369 5039 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.346392 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.346402 5039 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0de84706-a2dc-4680-83ba-324f8b41e3b0-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.346412 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l5t2n\" (UniqueName: \"kubernetes.io/projected/0de84706-a2dc-4680-83ba-324f8b41e3b0-kube-api-access-l5t2n\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.346422 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.372202 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-config-data" (OuterVolumeSpecName: "config-data") pod "0de84706-a2dc-4680-83ba-324f8b41e3b0" (UID: "0de84706-a2dc-4680-83ba-324f8b41e3b0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.448496 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0de84706-a2dc-4680-83ba-324f8b41e3b0-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.802869 5039 generic.go:334] "Generic (PLEG): container finished" podID="5e1f5668-c1c5-4a87-aec0-32a153351cf1" containerID="42c5c8393e93b491942c99da6b86f72f59b3b63c791e6234b59dea7f243a989d" exitCode=0 Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.802922 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-svzch" event={"ID":"5e1f5668-c1c5-4a87-aec0-32a153351cf1","Type":"ContainerDied","Data":"42c5c8393e93b491942c99da6b86f72f59b3b63c791e6234b59dea7f243a989d"} Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.806468 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0de84706-a2dc-4680-83ba-324f8b41e3b0","Type":"ContainerDied","Data":"283efecfb93cb8d465cab3a40805d1c1c324d939e1ae4f88af2ac476cdfe9c0a"} Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.806545 5039 scope.go:117] "RemoveContainer" containerID="1ab32fd8577eb452c7fdf1a261b11cb6733ec93b213d557d50512aab1c04a245" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.806587 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.837433 5039 scope.go:117] "RemoveContainer" containerID="3aa05fa45c905200e3588acaea885943297a7d77f91ad3cfb3278df4805a66e0" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.851347 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.862221 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.872070 5039 scope.go:117] "RemoveContainer" containerID="e15358e1677425bf1663a74aef6bd160b643b24c545b67b96a396a0234623c2f" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.894190 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:42:08 crc kubenswrapper[5039]: E1124 13:42:08.894856 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0de84706-a2dc-4680-83ba-324f8b41e3b0" containerName="sg-core" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.894875 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0de84706-a2dc-4680-83ba-324f8b41e3b0" containerName="sg-core" Nov 24 13:42:08 crc kubenswrapper[5039]: E1124 13:42:08.894914 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0de84706-a2dc-4680-83ba-324f8b41e3b0" containerName="proxy-httpd" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.894922 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0de84706-a2dc-4680-83ba-324f8b41e3b0" containerName="proxy-httpd" Nov 24 13:42:08 crc kubenswrapper[5039]: E1124 13:42:08.894937 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0de84706-a2dc-4680-83ba-324f8b41e3b0" containerName="ceilometer-notification-agent" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.894945 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0de84706-a2dc-4680-83ba-324f8b41e3b0" containerName="ceilometer-notification-agent" Nov 24 13:42:08 crc kubenswrapper[5039]: E1124 13:42:08.894961 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0de84706-a2dc-4680-83ba-324f8b41e3b0" containerName="ceilometer-central-agent" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.894968 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0de84706-a2dc-4680-83ba-324f8b41e3b0" containerName="ceilometer-central-agent" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.895203 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0de84706-a2dc-4680-83ba-324f8b41e3b0" containerName="sg-core" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.895224 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0de84706-a2dc-4680-83ba-324f8b41e3b0" containerName="ceilometer-notification-agent" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.895243 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0de84706-a2dc-4680-83ba-324f8b41e3b0" containerName="proxy-httpd" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.895253 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0de84706-a2dc-4680-83ba-324f8b41e3b0" containerName="ceilometer-central-agent" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.897467 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.902408 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.902629 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.922215 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.957719 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hz2xr\" (UniqueName: \"kubernetes.io/projected/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-kube-api-access-hz2xr\") pod \"ceilometer-0\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " pod="openstack/ceilometer-0" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.957796 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-run-httpd\") pod \"ceilometer-0\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " pod="openstack/ceilometer-0" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.957819 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-config-data\") pod \"ceilometer-0\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " pod="openstack/ceilometer-0" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.957846 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-scripts\") pod \"ceilometer-0\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " pod="openstack/ceilometer-0" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.957865 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-log-httpd\") pod \"ceilometer-0\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " pod="openstack/ceilometer-0" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.957922 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " pod="openstack/ceilometer-0" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.957972 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " pod="openstack/ceilometer-0" Nov 24 13:42:08 crc kubenswrapper[5039]: I1124 13:42:08.959171 5039 scope.go:117] "RemoveContainer" containerID="608c639a441065c5f92ad5328f1c52c10b4669e125f9523e6c9d96ccd9a4ff99" Nov 24 13:42:09 crc kubenswrapper[5039]: I1124 13:42:09.060022 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " pod="openstack/ceilometer-0" Nov 24 13:42:09 crc kubenswrapper[5039]: I1124 13:42:09.060164 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hz2xr\" (UniqueName: \"kubernetes.io/projected/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-kube-api-access-hz2xr\") pod \"ceilometer-0\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " pod="openstack/ceilometer-0" Nov 24 13:42:09 crc kubenswrapper[5039]: I1124 13:42:09.060214 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-run-httpd\") pod \"ceilometer-0\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " pod="openstack/ceilometer-0" Nov 24 13:42:09 crc kubenswrapper[5039]: I1124 13:42:09.060237 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-config-data\") pod \"ceilometer-0\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " pod="openstack/ceilometer-0" Nov 24 13:42:09 crc kubenswrapper[5039]: I1124 13:42:09.060272 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-scripts\") pod \"ceilometer-0\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " pod="openstack/ceilometer-0" Nov 24 13:42:09 crc kubenswrapper[5039]: I1124 13:42:09.060293 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-log-httpd\") pod \"ceilometer-0\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " pod="openstack/ceilometer-0" Nov 24 13:42:09 crc kubenswrapper[5039]: I1124 13:42:09.060345 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " pod="openstack/ceilometer-0" Nov 24 13:42:09 crc kubenswrapper[5039]: I1124 13:42:09.062082 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-log-httpd\") pod \"ceilometer-0\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " pod="openstack/ceilometer-0" Nov 24 13:42:09 crc kubenswrapper[5039]: I1124 13:42:09.062368 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-run-httpd\") pod \"ceilometer-0\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " pod="openstack/ceilometer-0" Nov 24 13:42:09 crc kubenswrapper[5039]: I1124 13:42:09.066156 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " pod="openstack/ceilometer-0" Nov 24 13:42:09 crc kubenswrapper[5039]: I1124 13:42:09.067117 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " pod="openstack/ceilometer-0" Nov 24 13:42:09 crc kubenswrapper[5039]: I1124 13:42:09.067841 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-scripts\") pod \"ceilometer-0\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " pod="openstack/ceilometer-0" Nov 24 13:42:09 crc kubenswrapper[5039]: I1124 13:42:09.068653 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-config-data\") pod \"ceilometer-0\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " pod="openstack/ceilometer-0" Nov 24 13:42:09 crc kubenswrapper[5039]: I1124 13:42:09.081170 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hz2xr\" (UniqueName: \"kubernetes.io/projected/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-kube-api-access-hz2xr\") pod \"ceilometer-0\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " pod="openstack/ceilometer-0" Nov 24 13:42:09 crc kubenswrapper[5039]: I1124 13:42:09.237134 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:42:09 crc kubenswrapper[5039]: I1124 13:42:09.238133 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:42:09 crc kubenswrapper[5039]: I1124 13:42:09.717857 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:42:09 crc kubenswrapper[5039]: I1124 13:42:09.819859 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5","Type":"ContainerStarted","Data":"f92c1355b1c41a1f41ee21a4dc65d6862a6d059c357f91f7fdf1d6e8754fc5fd"} Nov 24 13:42:10 crc kubenswrapper[5039]: I1124 13:42:10.322978 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0de84706-a2dc-4680-83ba-324f8b41e3b0" path="/var/lib/kubelet/pods/0de84706-a2dc-4680-83ba-324f8b41e3b0/volumes" Nov 24 13:42:10 crc kubenswrapper[5039]: I1124 13:42:10.836966 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5","Type":"ContainerStarted","Data":"308ec4c11b53fcca3882e826dbd6898c70245d7004086a15bcafe4374f00afe0"} Nov 24 13:42:11 crc kubenswrapper[5039]: I1124 13:42:11.552373 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-t8k26" Nov 24 13:42:11 crc kubenswrapper[5039]: I1124 13:42:11.552870 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-t8k26" Nov 24 13:42:11 crc kubenswrapper[5039]: I1124 13:42:11.852969 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5","Type":"ContainerStarted","Data":"ce420ade6af48d7f1e080df03707b887f4b9f72c0e1a797127d1a7a8f489346d"} Nov 24 13:42:12 crc kubenswrapper[5039]: I1124 13:42:12.612680 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-t8k26" podUID="b9a81aab-1978-4938-84f9-3a5511942ecb" containerName="registry-server" probeResult="failure" output=< Nov 24 13:42:12 crc kubenswrapper[5039]: timeout: failed to connect service ":50051" within 1s Nov 24 13:42:12 crc kubenswrapper[5039]: > Nov 24 13:42:13 crc kubenswrapper[5039]: I1124 13:42:13.874137 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5","Type":"ContainerStarted","Data":"967550871ffeb2315132060114a0037e1883c9dbf18e91ffa15c90d640297b60"} Nov 24 13:42:13 crc kubenswrapper[5039]: I1124 13:42:13.876250 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-svzch" event={"ID":"5e1f5668-c1c5-4a87-aec0-32a153351cf1","Type":"ContainerStarted","Data":"01c4d23b218dcada6adaeab36c7af12834f9b679fdfb074590db7721323c4ff7"} Nov 24 13:42:15 crc kubenswrapper[5039]: I1124 13:42:15.907065 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5","Type":"ContainerStarted","Data":"241f0540d59f914a52085675c2022bd187ca3643ec7a6267c2662e5b6d75f396"} Nov 24 13:42:15 crc kubenswrapper[5039]: I1124 13:42:15.907183 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" containerName="ceilometer-central-agent" containerID="cri-o://308ec4c11b53fcca3882e826dbd6898c70245d7004086a15bcafe4374f00afe0" gracePeriod=30 Nov 24 13:42:15 crc kubenswrapper[5039]: I1124 13:42:15.907402 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" containerName="proxy-httpd" containerID="cri-o://241f0540d59f914a52085675c2022bd187ca3643ec7a6267c2662e5b6d75f396" gracePeriod=30 Nov 24 13:42:15 crc kubenswrapper[5039]: I1124 13:42:15.907425 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 13:42:15 crc kubenswrapper[5039]: I1124 13:42:15.907465 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" containerName="sg-core" containerID="cri-o://967550871ffeb2315132060114a0037e1883c9dbf18e91ffa15c90d640297b60" gracePeriod=30 Nov 24 13:42:15 crc kubenswrapper[5039]: I1124 13:42:15.907535 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" containerName="ceilometer-notification-agent" containerID="cri-o://ce420ade6af48d7f1e080df03707b887f4b9f72c0e1a797127d1a7a8f489346d" gracePeriod=30 Nov 24 13:42:15 crc kubenswrapper[5039]: I1124 13:42:15.917458 5039 generic.go:334] "Generic (PLEG): container finished" podID="5e1f5668-c1c5-4a87-aec0-32a153351cf1" containerID="01c4d23b218dcada6adaeab36c7af12834f9b679fdfb074590db7721323c4ff7" exitCode=0 Nov 24 13:42:15 crc kubenswrapper[5039]: I1124 13:42:15.917530 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-svzch" event={"ID":"5e1f5668-c1c5-4a87-aec0-32a153351cf1","Type":"ContainerDied","Data":"01c4d23b218dcada6adaeab36c7af12834f9b679fdfb074590db7721323c4ff7"} Nov 24 13:42:15 crc kubenswrapper[5039]: I1124 13:42:15.940841 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.967248223 podStartE2EDuration="7.940824649s" podCreationTimestamp="2025-11-24 13:42:08 +0000 UTC" firstStartedPulling="2025-11-24 13:42:09.730156761 +0000 UTC m=+1442.169281261" lastFinishedPulling="2025-11-24 13:42:14.703733197 +0000 UTC m=+1447.142857687" observedRunningTime="2025-11-24 13:42:15.933466498 +0000 UTC m=+1448.372590998" watchObservedRunningTime="2025-11-24 13:42:15.940824649 +0000 UTC m=+1448.379949149" Nov 24 13:42:16 crc kubenswrapper[5039]: I1124 13:42:16.930380 5039 generic.go:334] "Generic (PLEG): container finished" podID="b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" containerID="241f0540d59f914a52085675c2022bd187ca3643ec7a6267c2662e5b6d75f396" exitCode=0 Nov 24 13:42:16 crc kubenswrapper[5039]: I1124 13:42:16.930747 5039 generic.go:334] "Generic (PLEG): container finished" podID="b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" containerID="967550871ffeb2315132060114a0037e1883c9dbf18e91ffa15c90d640297b60" exitCode=2 Nov 24 13:42:16 crc kubenswrapper[5039]: I1124 13:42:16.930760 5039 generic.go:334] "Generic (PLEG): container finished" podID="b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" containerID="ce420ade6af48d7f1e080df03707b887f4b9f72c0e1a797127d1a7a8f489346d" exitCode=0 Nov 24 13:42:16 crc kubenswrapper[5039]: I1124 13:42:16.930444 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5","Type":"ContainerDied","Data":"241f0540d59f914a52085675c2022bd187ca3643ec7a6267c2662e5b6d75f396"} Nov 24 13:42:16 crc kubenswrapper[5039]: I1124 13:42:16.930844 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5","Type":"ContainerDied","Data":"967550871ffeb2315132060114a0037e1883c9dbf18e91ffa15c90d640297b60"} Nov 24 13:42:16 crc kubenswrapper[5039]: I1124 13:42:16.930858 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5","Type":"ContainerDied","Data":"ce420ade6af48d7f1e080df03707b887f4b9f72c0e1a797127d1a7a8f489346d"} Nov 24 13:42:16 crc kubenswrapper[5039]: I1124 13:42:16.933145 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-svzch" event={"ID":"5e1f5668-c1c5-4a87-aec0-32a153351cf1","Type":"ContainerStarted","Data":"11dc4558c1f27246986908a78d83161b7d4694ef02946fa76ecdc69c55d119d7"} Nov 24 13:42:16 crc kubenswrapper[5039]: I1124 13:42:16.966995 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-svzch" podStartSLOduration=3.289435752 podStartE2EDuration="10.966974093s" podCreationTimestamp="2025-11-24 13:42:06 +0000 UTC" firstStartedPulling="2025-11-24 13:42:08.804936424 +0000 UTC m=+1441.244060924" lastFinishedPulling="2025-11-24 13:42:16.482474765 +0000 UTC m=+1448.921599265" observedRunningTime="2025-11-24 13:42:16.958627577 +0000 UTC m=+1449.397752077" watchObservedRunningTime="2025-11-24 13:42:16.966974093 +0000 UTC m=+1449.406098593" Nov 24 13:42:17 crc kubenswrapper[5039]: I1124 13:42:17.102217 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-svzch" Nov 24 13:42:17 crc kubenswrapper[5039]: I1124 13:42:17.102288 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-svzch" Nov 24 13:42:18 crc kubenswrapper[5039]: I1124 13:42:18.149305 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-svzch" podUID="5e1f5668-c1c5-4a87-aec0-32a153351cf1" containerName="registry-server" probeResult="failure" output=< Nov 24 13:42:18 crc kubenswrapper[5039]: timeout: failed to connect service ":50051" within 1s Nov 24 13:42:18 crc kubenswrapper[5039]: > Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.101414 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.102029 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.598459 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.628711 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-config-data\") pod \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.628767 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-combined-ca-bundle\") pod \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.628828 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-scripts\") pod \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.628910 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-sg-core-conf-yaml\") pod \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.628930 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hz2xr\" (UniqueName: \"kubernetes.io/projected/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-kube-api-access-hz2xr\") pod \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.628959 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-run-httpd\") pod \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.628994 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-log-httpd\") pod \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\" (UID: \"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5\") " Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.630037 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" (UID: "b68c50f7-1af8-48f2-85be-dea8e6ffe1e5"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.630707 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" (UID: "b68c50f7-1af8-48f2-85be-dea8e6ffe1e5"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.640784 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-kube-api-access-hz2xr" (OuterVolumeSpecName: "kube-api-access-hz2xr") pod "b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" (UID: "b68c50f7-1af8-48f2-85be-dea8e6ffe1e5"). InnerVolumeSpecName "kube-api-access-hz2xr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.641953 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-scripts" (OuterVolumeSpecName: "scripts") pod "b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" (UID: "b68c50f7-1af8-48f2-85be-dea8e6ffe1e5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.695395 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" (UID: "b68c50f7-1af8-48f2-85be-dea8e6ffe1e5"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.736524 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.737856 5039 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.737964 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hz2xr\" (UniqueName: \"kubernetes.io/projected/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-kube-api-access-hz2xr\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.738024 5039 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.738099 5039 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.761788 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" (UID: "b68c50f7-1af8-48f2-85be-dea8e6ffe1e5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.791654 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-config-data" (OuterVolumeSpecName: "config-data") pod "b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" (UID: "b68c50f7-1af8-48f2-85be-dea8e6ffe1e5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.840413 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.840443 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.973670 5039 generic.go:334] "Generic (PLEG): container finished" podID="b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" containerID="308ec4c11b53fcca3882e826dbd6898c70245d7004086a15bcafe4374f00afe0" exitCode=0 Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.973767 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5","Type":"ContainerDied","Data":"308ec4c11b53fcca3882e826dbd6898c70245d7004086a15bcafe4374f00afe0"} Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.973785 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.973821 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b68c50f7-1af8-48f2-85be-dea8e6ffe1e5","Type":"ContainerDied","Data":"f92c1355b1c41a1f41ee21a4dc65d6862a6d059c357f91f7fdf1d6e8754fc5fd"} Nov 24 13:42:20 crc kubenswrapper[5039]: I1124 13:42:20.973846 5039 scope.go:117] "RemoveContainer" containerID="241f0540d59f914a52085675c2022bd187ca3643ec7a6267c2662e5b6d75f396" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.017283 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.031170 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.041524 5039 scope.go:117] "RemoveContainer" containerID="967550871ffeb2315132060114a0037e1883c9dbf18e91ffa15c90d640297b60" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.044839 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:42:21 crc kubenswrapper[5039]: E1124 13:42:21.045272 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" containerName="ceilometer-notification-agent" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.045292 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" containerName="ceilometer-notification-agent" Nov 24 13:42:21 crc kubenswrapper[5039]: E1124 13:42:21.045304 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" containerName="sg-core" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.045310 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" containerName="sg-core" Nov 24 13:42:21 crc kubenswrapper[5039]: E1124 13:42:21.045326 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" containerName="ceilometer-central-agent" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.045336 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" containerName="ceilometer-central-agent" Nov 24 13:42:21 crc kubenswrapper[5039]: E1124 13:42:21.045363 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" containerName="proxy-httpd" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.045370 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" containerName="proxy-httpd" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.045614 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" containerName="ceilometer-notification-agent" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.045654 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" containerName="sg-core" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.045675 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" containerName="ceilometer-central-agent" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.045686 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" containerName="proxy-httpd" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.051860 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.059211 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.059428 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.073802 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.110761 5039 scope.go:117] "RemoveContainer" containerID="ce420ade6af48d7f1e080df03707b887f4b9f72c0e1a797127d1a7a8f489346d" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.137451 5039 scope.go:117] "RemoveContainer" containerID="308ec4c11b53fcca3882e826dbd6898c70245d7004086a15bcafe4374f00afe0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.151007 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5c3dde36-1fba-4d3f-812f-20c2118aecaa-log-httpd\") pod \"ceilometer-0\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.151496 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-scripts\") pod \"ceilometer-0\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.151737 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fvl9\" (UniqueName: \"kubernetes.io/projected/5c3dde36-1fba-4d3f-812f-20c2118aecaa-kube-api-access-2fvl9\") pod \"ceilometer-0\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.151846 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.152175 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.152225 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-config-data\") pod \"ceilometer-0\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.152403 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5c3dde36-1fba-4d3f-812f-20c2118aecaa-run-httpd\") pod \"ceilometer-0\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.165524 5039 scope.go:117] "RemoveContainer" containerID="241f0540d59f914a52085675c2022bd187ca3643ec7a6267c2662e5b6d75f396" Nov 24 13:42:21 crc kubenswrapper[5039]: E1124 13:42:21.166027 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"241f0540d59f914a52085675c2022bd187ca3643ec7a6267c2662e5b6d75f396\": container with ID starting with 241f0540d59f914a52085675c2022bd187ca3643ec7a6267c2662e5b6d75f396 not found: ID does not exist" containerID="241f0540d59f914a52085675c2022bd187ca3643ec7a6267c2662e5b6d75f396" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.166080 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"241f0540d59f914a52085675c2022bd187ca3643ec7a6267c2662e5b6d75f396"} err="failed to get container status \"241f0540d59f914a52085675c2022bd187ca3643ec7a6267c2662e5b6d75f396\": rpc error: code = NotFound desc = could not find container \"241f0540d59f914a52085675c2022bd187ca3643ec7a6267c2662e5b6d75f396\": container with ID starting with 241f0540d59f914a52085675c2022bd187ca3643ec7a6267c2662e5b6d75f396 not found: ID does not exist" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.166112 5039 scope.go:117] "RemoveContainer" containerID="967550871ffeb2315132060114a0037e1883c9dbf18e91ffa15c90d640297b60" Nov 24 13:42:21 crc kubenswrapper[5039]: E1124 13:42:21.166431 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"967550871ffeb2315132060114a0037e1883c9dbf18e91ffa15c90d640297b60\": container with ID starting with 967550871ffeb2315132060114a0037e1883c9dbf18e91ffa15c90d640297b60 not found: ID does not exist" containerID="967550871ffeb2315132060114a0037e1883c9dbf18e91ffa15c90d640297b60" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.166469 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"967550871ffeb2315132060114a0037e1883c9dbf18e91ffa15c90d640297b60"} err="failed to get container status \"967550871ffeb2315132060114a0037e1883c9dbf18e91ffa15c90d640297b60\": rpc error: code = NotFound desc = could not find container \"967550871ffeb2315132060114a0037e1883c9dbf18e91ffa15c90d640297b60\": container with ID starting with 967550871ffeb2315132060114a0037e1883c9dbf18e91ffa15c90d640297b60 not found: ID does not exist" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.166489 5039 scope.go:117] "RemoveContainer" containerID="ce420ade6af48d7f1e080df03707b887f4b9f72c0e1a797127d1a7a8f489346d" Nov 24 13:42:21 crc kubenswrapper[5039]: E1124 13:42:21.166710 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce420ade6af48d7f1e080df03707b887f4b9f72c0e1a797127d1a7a8f489346d\": container with ID starting with ce420ade6af48d7f1e080df03707b887f4b9f72c0e1a797127d1a7a8f489346d not found: ID does not exist" containerID="ce420ade6af48d7f1e080df03707b887f4b9f72c0e1a797127d1a7a8f489346d" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.166735 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce420ade6af48d7f1e080df03707b887f4b9f72c0e1a797127d1a7a8f489346d"} err="failed to get container status \"ce420ade6af48d7f1e080df03707b887f4b9f72c0e1a797127d1a7a8f489346d\": rpc error: code = NotFound desc = could not find container \"ce420ade6af48d7f1e080df03707b887f4b9f72c0e1a797127d1a7a8f489346d\": container with ID starting with ce420ade6af48d7f1e080df03707b887f4b9f72c0e1a797127d1a7a8f489346d not found: ID does not exist" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.166751 5039 scope.go:117] "RemoveContainer" containerID="308ec4c11b53fcca3882e826dbd6898c70245d7004086a15bcafe4374f00afe0" Nov 24 13:42:21 crc kubenswrapper[5039]: E1124 13:42:21.167012 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"308ec4c11b53fcca3882e826dbd6898c70245d7004086a15bcafe4374f00afe0\": container with ID starting with 308ec4c11b53fcca3882e826dbd6898c70245d7004086a15bcafe4374f00afe0 not found: ID does not exist" containerID="308ec4c11b53fcca3882e826dbd6898c70245d7004086a15bcafe4374f00afe0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.167033 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"308ec4c11b53fcca3882e826dbd6898c70245d7004086a15bcafe4374f00afe0"} err="failed to get container status \"308ec4c11b53fcca3882e826dbd6898c70245d7004086a15bcafe4374f00afe0\": rpc error: code = NotFound desc = could not find container \"308ec4c11b53fcca3882e826dbd6898c70245d7004086a15bcafe4374f00afe0\": container with ID starting with 308ec4c11b53fcca3882e826dbd6898c70245d7004086a15bcafe4374f00afe0 not found: ID does not exist" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.255094 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-scripts\") pod \"ceilometer-0\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.255220 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fvl9\" (UniqueName: \"kubernetes.io/projected/5c3dde36-1fba-4d3f-812f-20c2118aecaa-kube-api-access-2fvl9\") pod \"ceilometer-0\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.255252 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.255562 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.256129 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-config-data\") pod \"ceilometer-0\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.256198 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5c3dde36-1fba-4d3f-812f-20c2118aecaa-run-httpd\") pod \"ceilometer-0\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.256231 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5c3dde36-1fba-4d3f-812f-20c2118aecaa-log-httpd\") pod \"ceilometer-0\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.256708 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5c3dde36-1fba-4d3f-812f-20c2118aecaa-log-httpd\") pod \"ceilometer-0\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.257395 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5c3dde36-1fba-4d3f-812f-20c2118aecaa-run-httpd\") pod \"ceilometer-0\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.259704 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-scripts\") pod \"ceilometer-0\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.263093 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-config-data\") pod \"ceilometer-0\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.263265 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.269604 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.273254 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fvl9\" (UniqueName: \"kubernetes.io/projected/5c3dde36-1fba-4d3f-812f-20c2118aecaa-kube-api-access-2fvl9\") pod \"ceilometer-0\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.371449 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.857231 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.985209 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5c3dde36-1fba-4d3f-812f-20c2118aecaa","Type":"ContainerStarted","Data":"ac3ab405f52449a5ef47711971cab5c27f7f68ba329daf474446c7d97daa6d9b"} Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.986837 5039 generic.go:334] "Generic (PLEG): container finished" podID="5a720981-59f4-4a6c-bc6a-ea08f5aa101b" containerID="0bc29d7ec77e50a067302d1c7c1db679819a0c41054ca499b314238e31599ff7" exitCode=0 Nov 24 13:42:21 crc kubenswrapper[5039]: I1124 13:42:21.986873 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-qn652" event={"ID":"5a720981-59f4-4a6c-bc6a-ea08f5aa101b","Type":"ContainerDied","Data":"0bc29d7ec77e50a067302d1c7c1db679819a0c41054ca499b314238e31599ff7"} Nov 24 13:42:22 crc kubenswrapper[5039]: I1124 13:42:22.344619 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b68c50f7-1af8-48f2-85be-dea8e6ffe1e5" path="/var/lib/kubelet/pods/b68c50f7-1af8-48f2-85be-dea8e6ffe1e5/volumes" Nov 24 13:42:22 crc kubenswrapper[5039]: I1124 13:42:22.625451 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-t8k26" podUID="b9a81aab-1978-4938-84f9-3a5511942ecb" containerName="registry-server" probeResult="failure" output=< Nov 24 13:42:22 crc kubenswrapper[5039]: timeout: failed to connect service ":50051" within 1s Nov 24 13:42:22 crc kubenswrapper[5039]: > Nov 24 13:42:22 crc kubenswrapper[5039]: I1124 13:42:22.999020 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5c3dde36-1fba-4d3f-812f-20c2118aecaa","Type":"ContainerStarted","Data":"f0c59fc9f81fa0f7500b240f06a3a5b558f415016d7c1f34609ab5dfe30cbc87"} Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.443939 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-qn652" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.464359 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-wzr9w"] Nov 24 13:42:23 crc kubenswrapper[5039]: E1124 13:42:23.465064 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a720981-59f4-4a6c-bc6a-ea08f5aa101b" containerName="nova-cell0-conductor-db-sync" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.465090 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a720981-59f4-4a6c-bc6a-ea08f5aa101b" containerName="nova-cell0-conductor-db-sync" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.465269 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a720981-59f4-4a6c-bc6a-ea08f5aa101b" containerName="nova-cell0-conductor-db-sync" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.466085 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-wzr9w" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.471845 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-wzr9w"] Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.505962 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8s9qz\" (UniqueName: \"kubernetes.io/projected/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-kube-api-access-8s9qz\") pod \"5a720981-59f4-4a6c-bc6a-ea08f5aa101b\" (UID: \"5a720981-59f4-4a6c-bc6a-ea08f5aa101b\") " Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.506064 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-config-data\") pod \"5a720981-59f4-4a6c-bc6a-ea08f5aa101b\" (UID: \"5a720981-59f4-4a6c-bc6a-ea08f5aa101b\") " Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.506245 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-scripts\") pod \"5a720981-59f4-4a6c-bc6a-ea08f5aa101b\" (UID: \"5a720981-59f4-4a6c-bc6a-ea08f5aa101b\") " Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.506313 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-combined-ca-bundle\") pod \"5a720981-59f4-4a6c-bc6a-ea08f5aa101b\" (UID: \"5a720981-59f4-4a6c-bc6a-ea08f5aa101b\") " Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.511742 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-kube-api-access-8s9qz" (OuterVolumeSpecName: "kube-api-access-8s9qz") pod "5a720981-59f4-4a6c-bc6a-ea08f5aa101b" (UID: "5a720981-59f4-4a6c-bc6a-ea08f5aa101b"). InnerVolumeSpecName "kube-api-access-8s9qz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.521641 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-scripts" (OuterVolumeSpecName: "scripts") pod "5a720981-59f4-4a6c-bc6a-ea08f5aa101b" (UID: "5a720981-59f4-4a6c-bc6a-ea08f5aa101b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.582992 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5a720981-59f4-4a6c-bc6a-ea08f5aa101b" (UID: "5a720981-59f4-4a6c-bc6a-ea08f5aa101b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.585355 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-8ba9-account-create-qkpgl"] Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.587009 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-8ba9-account-create-qkpgl" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.590726 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.608919 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lb57x\" (UniqueName: \"kubernetes.io/projected/dc53aceb-f577-4036-88f5-76d8fe736cb0-kube-api-access-lb57x\") pod \"aodh-db-create-wzr9w\" (UID: \"dc53aceb-f577-4036-88f5-76d8fe736cb0\") " pod="openstack/aodh-db-create-wzr9w" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.609026 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc53aceb-f577-4036-88f5-76d8fe736cb0-operator-scripts\") pod \"aodh-db-create-wzr9w\" (UID: \"dc53aceb-f577-4036-88f5-76d8fe736cb0\") " pod="openstack/aodh-db-create-wzr9w" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.609147 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8s9qz\" (UniqueName: \"kubernetes.io/projected/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-kube-api-access-8s9qz\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.609158 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.609168 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.618604 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-8ba9-account-create-qkpgl"] Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.646119 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-config-data" (OuterVolumeSpecName: "config-data") pod "5a720981-59f4-4a6c-bc6a-ea08f5aa101b" (UID: "5a720981-59f4-4a6c-bc6a-ea08f5aa101b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.710782 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvspm\" (UniqueName: \"kubernetes.io/projected/725180e7-824d-4133-ba16-8be24fa96cc9-kube-api-access-vvspm\") pod \"aodh-8ba9-account-create-qkpgl\" (UID: \"725180e7-824d-4133-ba16-8be24fa96cc9\") " pod="openstack/aodh-8ba9-account-create-qkpgl" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.710875 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lb57x\" (UniqueName: \"kubernetes.io/projected/dc53aceb-f577-4036-88f5-76d8fe736cb0-kube-api-access-lb57x\") pod \"aodh-db-create-wzr9w\" (UID: \"dc53aceb-f577-4036-88f5-76d8fe736cb0\") " pod="openstack/aodh-db-create-wzr9w" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.710990 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/725180e7-824d-4133-ba16-8be24fa96cc9-operator-scripts\") pod \"aodh-8ba9-account-create-qkpgl\" (UID: \"725180e7-824d-4133-ba16-8be24fa96cc9\") " pod="openstack/aodh-8ba9-account-create-qkpgl" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.711027 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc53aceb-f577-4036-88f5-76d8fe736cb0-operator-scripts\") pod \"aodh-db-create-wzr9w\" (UID: \"dc53aceb-f577-4036-88f5-76d8fe736cb0\") " pod="openstack/aodh-db-create-wzr9w" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.711163 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a720981-59f4-4a6c-bc6a-ea08f5aa101b-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.712011 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc53aceb-f577-4036-88f5-76d8fe736cb0-operator-scripts\") pod \"aodh-db-create-wzr9w\" (UID: \"dc53aceb-f577-4036-88f5-76d8fe736cb0\") " pod="openstack/aodh-db-create-wzr9w" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.732322 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lb57x\" (UniqueName: \"kubernetes.io/projected/dc53aceb-f577-4036-88f5-76d8fe736cb0-kube-api-access-lb57x\") pod \"aodh-db-create-wzr9w\" (UID: \"dc53aceb-f577-4036-88f5-76d8fe736cb0\") " pod="openstack/aodh-db-create-wzr9w" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.790984 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-wzr9w" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.813267 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvspm\" (UniqueName: \"kubernetes.io/projected/725180e7-824d-4133-ba16-8be24fa96cc9-kube-api-access-vvspm\") pod \"aodh-8ba9-account-create-qkpgl\" (UID: \"725180e7-824d-4133-ba16-8be24fa96cc9\") " pod="openstack/aodh-8ba9-account-create-qkpgl" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.813536 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/725180e7-824d-4133-ba16-8be24fa96cc9-operator-scripts\") pod \"aodh-8ba9-account-create-qkpgl\" (UID: \"725180e7-824d-4133-ba16-8be24fa96cc9\") " pod="openstack/aodh-8ba9-account-create-qkpgl" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.814202 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/725180e7-824d-4133-ba16-8be24fa96cc9-operator-scripts\") pod \"aodh-8ba9-account-create-qkpgl\" (UID: \"725180e7-824d-4133-ba16-8be24fa96cc9\") " pod="openstack/aodh-8ba9-account-create-qkpgl" Nov 24 13:42:23 crc kubenswrapper[5039]: I1124 13:42:23.834315 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvspm\" (UniqueName: \"kubernetes.io/projected/725180e7-824d-4133-ba16-8be24fa96cc9-kube-api-access-vvspm\") pod \"aodh-8ba9-account-create-qkpgl\" (UID: \"725180e7-824d-4133-ba16-8be24fa96cc9\") " pod="openstack/aodh-8ba9-account-create-qkpgl" Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.015825 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-qn652" Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.016081 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-qn652" event={"ID":"5a720981-59f4-4a6c-bc6a-ea08f5aa101b","Type":"ContainerDied","Data":"1c6ee46f8f6b152253e81948b1655dc106dbd1997179ba9c9ae16f95b5889a6a"} Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.016126 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1c6ee46f8f6b152253e81948b1655dc106dbd1997179ba9c9ae16f95b5889a6a" Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.030498 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5c3dde36-1fba-4d3f-812f-20c2118aecaa","Type":"ContainerStarted","Data":"c2e6ba544a5a6ed1b9cffc2fb6bab004cf3517825a2d7d6738deb68cda50217f"} Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.104460 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-8ba9-account-create-qkpgl" Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.119455 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.122442 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.128555 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-9nq8q" Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.138724 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.148121 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.222275 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62ba09af-0d54-45af-8bed-9c8a1a3661f2-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"62ba09af-0d54-45af-8bed-9c8a1a3661f2\") " pod="openstack/nova-cell0-conductor-0" Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.222393 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mffl\" (UniqueName: \"kubernetes.io/projected/62ba09af-0d54-45af-8bed-9c8a1a3661f2-kube-api-access-5mffl\") pod \"nova-cell0-conductor-0\" (UID: \"62ba09af-0d54-45af-8bed-9c8a1a3661f2\") " pod="openstack/nova-cell0-conductor-0" Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.222655 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62ba09af-0d54-45af-8bed-9c8a1a3661f2-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"62ba09af-0d54-45af-8bed-9c8a1a3661f2\") " pod="openstack/nova-cell0-conductor-0" Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.301894 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-wzr9w"] Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.327832 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62ba09af-0d54-45af-8bed-9c8a1a3661f2-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"62ba09af-0d54-45af-8bed-9c8a1a3661f2\") " pod="openstack/nova-cell0-conductor-0" Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.327890 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mffl\" (UniqueName: \"kubernetes.io/projected/62ba09af-0d54-45af-8bed-9c8a1a3661f2-kube-api-access-5mffl\") pod \"nova-cell0-conductor-0\" (UID: \"62ba09af-0d54-45af-8bed-9c8a1a3661f2\") " pod="openstack/nova-cell0-conductor-0" Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.328002 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62ba09af-0d54-45af-8bed-9c8a1a3661f2-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"62ba09af-0d54-45af-8bed-9c8a1a3661f2\") " pod="openstack/nova-cell0-conductor-0" Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.338306 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62ba09af-0d54-45af-8bed-9c8a1a3661f2-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"62ba09af-0d54-45af-8bed-9c8a1a3661f2\") " pod="openstack/nova-cell0-conductor-0" Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.348727 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62ba09af-0d54-45af-8bed-9c8a1a3661f2-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"62ba09af-0d54-45af-8bed-9c8a1a3661f2\") " pod="openstack/nova-cell0-conductor-0" Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.348921 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mffl\" (UniqueName: \"kubernetes.io/projected/62ba09af-0d54-45af-8bed-9c8a1a3661f2-kube-api-access-5mffl\") pod \"nova-cell0-conductor-0\" (UID: \"62ba09af-0d54-45af-8bed-9c8a1a3661f2\") " pod="openstack/nova-cell0-conductor-0" Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.443153 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.646664 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-8ba9-account-create-qkpgl"] Nov 24 13:42:24 crc kubenswrapper[5039]: W1124 13:42:24.671205 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod725180e7_824d_4133_ba16_8be24fa96cc9.slice/crio-22197ce866187760a5495276703f7c43e61a1856448d0379159fe294ef5dd1be WatchSource:0}: Error finding container 22197ce866187760a5495276703f7c43e61a1856448d0379159fe294ef5dd1be: Status 404 returned error can't find the container with id 22197ce866187760a5495276703f7c43e61a1856448d0379159fe294ef5dd1be Nov 24 13:42:24 crc kubenswrapper[5039]: W1124 13:42:24.944719 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod62ba09af_0d54_45af_8bed_9c8a1a3661f2.slice/crio-a386f1ef14107808f858618e8768e68bb96b771d9c87f2f05d453901cedc4ac0 WatchSource:0}: Error finding container a386f1ef14107808f858618e8768e68bb96b771d9c87f2f05d453901cedc4ac0: Status 404 returned error can't find the container with id a386f1ef14107808f858618e8768e68bb96b771d9c87f2f05d453901cedc4ac0 Nov 24 13:42:24 crc kubenswrapper[5039]: I1124 13:42:24.958127 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 24 13:42:25 crc kubenswrapper[5039]: I1124 13:42:25.050910 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5c3dde36-1fba-4d3f-812f-20c2118aecaa","Type":"ContainerStarted","Data":"1ab5882346438ed134b9131b22c7c567a8ae07ff56e2310bd8b6145e8b911fb4"} Nov 24 13:42:25 crc kubenswrapper[5039]: I1124 13:42:25.052674 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-8ba9-account-create-qkpgl" event={"ID":"725180e7-824d-4133-ba16-8be24fa96cc9","Type":"ContainerStarted","Data":"133af9a7fdccba325874a1ce4edf2046dd1cfb39304d042d2247df286632b160"} Nov 24 13:42:25 crc kubenswrapper[5039]: I1124 13:42:25.052711 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-8ba9-account-create-qkpgl" event={"ID":"725180e7-824d-4133-ba16-8be24fa96cc9","Type":"ContainerStarted","Data":"22197ce866187760a5495276703f7c43e61a1856448d0379159fe294ef5dd1be"} Nov 24 13:42:25 crc kubenswrapper[5039]: I1124 13:42:25.055936 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"62ba09af-0d54-45af-8bed-9c8a1a3661f2","Type":"ContainerStarted","Data":"a386f1ef14107808f858618e8768e68bb96b771d9c87f2f05d453901cedc4ac0"} Nov 24 13:42:25 crc kubenswrapper[5039]: I1124 13:42:25.058927 5039 generic.go:334] "Generic (PLEG): container finished" podID="dc53aceb-f577-4036-88f5-76d8fe736cb0" containerID="650a92f781e12917b226edab0e88c2c82ba420834c78e13cf0c3dae5a421285f" exitCode=0 Nov 24 13:42:25 crc kubenswrapper[5039]: I1124 13:42:25.058990 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-wzr9w" event={"ID":"dc53aceb-f577-4036-88f5-76d8fe736cb0","Type":"ContainerDied","Data":"650a92f781e12917b226edab0e88c2c82ba420834c78e13cf0c3dae5a421285f"} Nov 24 13:42:25 crc kubenswrapper[5039]: I1124 13:42:25.059029 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-wzr9w" event={"ID":"dc53aceb-f577-4036-88f5-76d8fe736cb0","Type":"ContainerStarted","Data":"1fc59a337d162d5a6b72a5ed860079c3a6f100156c2e125c5d746db857b99ffd"} Nov 24 13:42:25 crc kubenswrapper[5039]: I1124 13:42:25.073725 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-8ba9-account-create-qkpgl" podStartSLOduration=2.073708369 podStartE2EDuration="2.073708369s" podCreationTimestamp="2025-11-24 13:42:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:42:25.071168386 +0000 UTC m=+1457.510292886" watchObservedRunningTime="2025-11-24 13:42:25.073708369 +0000 UTC m=+1457.512832869" Nov 24 13:42:26 crc kubenswrapper[5039]: I1124 13:42:26.071157 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5c3dde36-1fba-4d3f-812f-20c2118aecaa","Type":"ContainerStarted","Data":"e658ae07e319c31fe236230f6a0c9d415d4023e9f542e782897b69e1791ea092"} Nov 24 13:42:26 crc kubenswrapper[5039]: I1124 13:42:26.071718 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 13:42:26 crc kubenswrapper[5039]: I1124 13:42:26.073757 5039 generic.go:334] "Generic (PLEG): container finished" podID="725180e7-824d-4133-ba16-8be24fa96cc9" containerID="133af9a7fdccba325874a1ce4edf2046dd1cfb39304d042d2247df286632b160" exitCode=0 Nov 24 13:42:26 crc kubenswrapper[5039]: I1124 13:42:26.073801 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-8ba9-account-create-qkpgl" event={"ID":"725180e7-824d-4133-ba16-8be24fa96cc9","Type":"ContainerDied","Data":"133af9a7fdccba325874a1ce4edf2046dd1cfb39304d042d2247df286632b160"} Nov 24 13:42:26 crc kubenswrapper[5039]: I1124 13:42:26.075529 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"62ba09af-0d54-45af-8bed-9c8a1a3661f2","Type":"ContainerStarted","Data":"d241b4bc8b2a6b006b838303bc60725bee537cbac18166135584ecc8c8d7dabe"} Nov 24 13:42:26 crc kubenswrapper[5039]: I1124 13:42:26.103921 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.542283235 podStartE2EDuration="5.103898811s" podCreationTimestamp="2025-11-24 13:42:21 +0000 UTC" firstStartedPulling="2025-11-24 13:42:21.86429637 +0000 UTC m=+1454.303420870" lastFinishedPulling="2025-11-24 13:42:25.425911946 +0000 UTC m=+1457.865036446" observedRunningTime="2025-11-24 13:42:26.092633364 +0000 UTC m=+1458.531757884" watchObservedRunningTime="2025-11-24 13:42:26.103898811 +0000 UTC m=+1458.543023311" Nov 24 13:42:26 crc kubenswrapper[5039]: I1124 13:42:26.138933 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.138911764 podStartE2EDuration="2.138911764s" podCreationTimestamp="2025-11-24 13:42:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:42:26.125475193 +0000 UTC m=+1458.564599703" watchObservedRunningTime="2025-11-24 13:42:26.138911764 +0000 UTC m=+1458.578036264" Nov 24 13:42:26 crc kubenswrapper[5039]: I1124 13:42:26.491416 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-wzr9w" Nov 24 13:42:26 crc kubenswrapper[5039]: I1124 13:42:26.578781 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc53aceb-f577-4036-88f5-76d8fe736cb0-operator-scripts\") pod \"dc53aceb-f577-4036-88f5-76d8fe736cb0\" (UID: \"dc53aceb-f577-4036-88f5-76d8fe736cb0\") " Nov 24 13:42:26 crc kubenswrapper[5039]: I1124 13:42:26.579488 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lb57x\" (UniqueName: \"kubernetes.io/projected/dc53aceb-f577-4036-88f5-76d8fe736cb0-kube-api-access-lb57x\") pod \"dc53aceb-f577-4036-88f5-76d8fe736cb0\" (UID: \"dc53aceb-f577-4036-88f5-76d8fe736cb0\") " Nov 24 13:42:26 crc kubenswrapper[5039]: I1124 13:42:26.579727 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc53aceb-f577-4036-88f5-76d8fe736cb0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "dc53aceb-f577-4036-88f5-76d8fe736cb0" (UID: "dc53aceb-f577-4036-88f5-76d8fe736cb0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:42:26 crc kubenswrapper[5039]: I1124 13:42:26.580677 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc53aceb-f577-4036-88f5-76d8fe736cb0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:26 crc kubenswrapper[5039]: I1124 13:42:26.584686 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc53aceb-f577-4036-88f5-76d8fe736cb0-kube-api-access-lb57x" (OuterVolumeSpecName: "kube-api-access-lb57x") pod "dc53aceb-f577-4036-88f5-76d8fe736cb0" (UID: "dc53aceb-f577-4036-88f5-76d8fe736cb0"). InnerVolumeSpecName "kube-api-access-lb57x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:42:26 crc kubenswrapper[5039]: I1124 13:42:26.683117 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lb57x\" (UniqueName: \"kubernetes.io/projected/dc53aceb-f577-4036-88f5-76d8fe736cb0-kube-api-access-lb57x\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:27 crc kubenswrapper[5039]: I1124 13:42:27.089595 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-wzr9w" event={"ID":"dc53aceb-f577-4036-88f5-76d8fe736cb0","Type":"ContainerDied","Data":"1fc59a337d162d5a6b72a5ed860079c3a6f100156c2e125c5d746db857b99ffd"} Nov 24 13:42:27 crc kubenswrapper[5039]: I1124 13:42:27.089647 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fc59a337d162d5a6b72a5ed860079c3a6f100156c2e125c5d746db857b99ffd" Nov 24 13:42:27 crc kubenswrapper[5039]: I1124 13:42:27.089725 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-wzr9w" Nov 24 13:42:27 crc kubenswrapper[5039]: I1124 13:42:27.090023 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 24 13:42:27 crc kubenswrapper[5039]: I1124 13:42:27.182350 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-svzch" Nov 24 13:42:27 crc kubenswrapper[5039]: I1124 13:42:27.272077 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-svzch" Nov 24 13:42:27 crc kubenswrapper[5039]: I1124 13:42:27.366344 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-svzch"] Nov 24 13:42:27 crc kubenswrapper[5039]: I1124 13:42:27.455622 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qr7w8"] Nov 24 13:42:27 crc kubenswrapper[5039]: I1124 13:42:27.458562 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qr7w8" podUID="f3688350-42c7-4e8e-886d-e4d3c718221f" containerName="registry-server" containerID="cri-o://39fb1e4afc6a9a97324b83d503ef1f71cef1adc38d1446d129690b204ac32b45" gracePeriod=2 Nov 24 13:42:27 crc kubenswrapper[5039]: I1124 13:42:27.686119 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-8ba9-account-create-qkpgl" Nov 24 13:42:27 crc kubenswrapper[5039]: I1124 13:42:27.814531 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/725180e7-824d-4133-ba16-8be24fa96cc9-operator-scripts\") pod \"725180e7-824d-4133-ba16-8be24fa96cc9\" (UID: \"725180e7-824d-4133-ba16-8be24fa96cc9\") " Nov 24 13:42:27 crc kubenswrapper[5039]: I1124 13:42:27.814777 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vvspm\" (UniqueName: \"kubernetes.io/projected/725180e7-824d-4133-ba16-8be24fa96cc9-kube-api-access-vvspm\") pod \"725180e7-824d-4133-ba16-8be24fa96cc9\" (UID: \"725180e7-824d-4133-ba16-8be24fa96cc9\") " Nov 24 13:42:27 crc kubenswrapper[5039]: I1124 13:42:27.818319 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/725180e7-824d-4133-ba16-8be24fa96cc9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "725180e7-824d-4133-ba16-8be24fa96cc9" (UID: "725180e7-824d-4133-ba16-8be24fa96cc9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:42:27 crc kubenswrapper[5039]: I1124 13:42:27.851872 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/725180e7-824d-4133-ba16-8be24fa96cc9-kube-api-access-vvspm" (OuterVolumeSpecName: "kube-api-access-vvspm") pod "725180e7-824d-4133-ba16-8be24fa96cc9" (UID: "725180e7-824d-4133-ba16-8be24fa96cc9"). InnerVolumeSpecName "kube-api-access-vvspm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:42:27 crc kubenswrapper[5039]: I1124 13:42:27.916919 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vvspm\" (UniqueName: \"kubernetes.io/projected/725180e7-824d-4133-ba16-8be24fa96cc9-kube-api-access-vvspm\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:27 crc kubenswrapper[5039]: I1124 13:42:27.916949 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/725180e7-824d-4133-ba16-8be24fa96cc9-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:27 crc kubenswrapper[5039]: I1124 13:42:27.953101 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qr7w8" Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.020748 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gwkrq\" (UniqueName: \"kubernetes.io/projected/f3688350-42c7-4e8e-886d-e4d3c718221f-kube-api-access-gwkrq\") pod \"f3688350-42c7-4e8e-886d-e4d3c718221f\" (UID: \"f3688350-42c7-4e8e-886d-e4d3c718221f\") " Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.020869 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3688350-42c7-4e8e-886d-e4d3c718221f-catalog-content\") pod \"f3688350-42c7-4e8e-886d-e4d3c718221f\" (UID: \"f3688350-42c7-4e8e-886d-e4d3c718221f\") " Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.021018 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3688350-42c7-4e8e-886d-e4d3c718221f-utilities\") pod \"f3688350-42c7-4e8e-886d-e4d3c718221f\" (UID: \"f3688350-42c7-4e8e-886d-e4d3c718221f\") " Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.023651 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3688350-42c7-4e8e-886d-e4d3c718221f-utilities" (OuterVolumeSpecName: "utilities") pod "f3688350-42c7-4e8e-886d-e4d3c718221f" (UID: "f3688350-42c7-4e8e-886d-e4d3c718221f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.029764 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3688350-42c7-4e8e-886d-e4d3c718221f-kube-api-access-gwkrq" (OuterVolumeSpecName: "kube-api-access-gwkrq") pod "f3688350-42c7-4e8e-886d-e4d3c718221f" (UID: "f3688350-42c7-4e8e-886d-e4d3c718221f"). InnerVolumeSpecName "kube-api-access-gwkrq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.106252 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3688350-42c7-4e8e-886d-e4d3c718221f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f3688350-42c7-4e8e-886d-e4d3c718221f" (UID: "f3688350-42c7-4e8e-886d-e4d3c718221f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.127882 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3688350-42c7-4e8e-886d-e4d3c718221f-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.127913 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gwkrq\" (UniqueName: \"kubernetes.io/projected/f3688350-42c7-4e8e-886d-e4d3c718221f-kube-api-access-gwkrq\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.127925 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3688350-42c7-4e8e-886d-e4d3c718221f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.131034 5039 generic.go:334] "Generic (PLEG): container finished" podID="f3688350-42c7-4e8e-886d-e4d3c718221f" containerID="39fb1e4afc6a9a97324b83d503ef1f71cef1adc38d1446d129690b204ac32b45" exitCode=0 Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.131102 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qr7w8" event={"ID":"f3688350-42c7-4e8e-886d-e4d3c718221f","Type":"ContainerDied","Data":"39fb1e4afc6a9a97324b83d503ef1f71cef1adc38d1446d129690b204ac32b45"} Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.131131 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qr7w8" event={"ID":"f3688350-42c7-4e8e-886d-e4d3c718221f","Type":"ContainerDied","Data":"d129708db0749f5552f3d3b18fc192df85b12a251a425a0b9af606c295e9fc4a"} Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.131146 5039 scope.go:117] "RemoveContainer" containerID="39fb1e4afc6a9a97324b83d503ef1f71cef1adc38d1446d129690b204ac32b45" Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.131292 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qr7w8" Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.144268 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-8ba9-account-create-qkpgl" Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.145727 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-8ba9-account-create-qkpgl" event={"ID":"725180e7-824d-4133-ba16-8be24fa96cc9","Type":"ContainerDied","Data":"22197ce866187760a5495276703f7c43e61a1856448d0379159fe294ef5dd1be"} Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.145781 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="22197ce866187760a5495276703f7c43e61a1856448d0379159fe294ef5dd1be" Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.179943 5039 scope.go:117] "RemoveContainer" containerID="e95802098afe31499de8bd68d93106457377b3dcef6e1aa39012b850812bda26" Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.194668 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qr7w8"] Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.215646 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qr7w8"] Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.233978 5039 scope.go:117] "RemoveContainer" containerID="452390df03b3a575c0a473b41ee9ad4d3ea5952a126283be9bbd4e001c716ec6" Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.260956 5039 scope.go:117] "RemoveContainer" containerID="39fb1e4afc6a9a97324b83d503ef1f71cef1adc38d1446d129690b204ac32b45" Nov 24 13:42:28 crc kubenswrapper[5039]: E1124 13:42:28.261326 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39fb1e4afc6a9a97324b83d503ef1f71cef1adc38d1446d129690b204ac32b45\": container with ID starting with 39fb1e4afc6a9a97324b83d503ef1f71cef1adc38d1446d129690b204ac32b45 not found: ID does not exist" containerID="39fb1e4afc6a9a97324b83d503ef1f71cef1adc38d1446d129690b204ac32b45" Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.261365 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39fb1e4afc6a9a97324b83d503ef1f71cef1adc38d1446d129690b204ac32b45"} err="failed to get container status \"39fb1e4afc6a9a97324b83d503ef1f71cef1adc38d1446d129690b204ac32b45\": rpc error: code = NotFound desc = could not find container \"39fb1e4afc6a9a97324b83d503ef1f71cef1adc38d1446d129690b204ac32b45\": container with ID starting with 39fb1e4afc6a9a97324b83d503ef1f71cef1adc38d1446d129690b204ac32b45 not found: ID does not exist" Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.261394 5039 scope.go:117] "RemoveContainer" containerID="e95802098afe31499de8bd68d93106457377b3dcef6e1aa39012b850812bda26" Nov 24 13:42:28 crc kubenswrapper[5039]: E1124 13:42:28.261822 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e95802098afe31499de8bd68d93106457377b3dcef6e1aa39012b850812bda26\": container with ID starting with e95802098afe31499de8bd68d93106457377b3dcef6e1aa39012b850812bda26 not found: ID does not exist" containerID="e95802098afe31499de8bd68d93106457377b3dcef6e1aa39012b850812bda26" Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.261848 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e95802098afe31499de8bd68d93106457377b3dcef6e1aa39012b850812bda26"} err="failed to get container status \"e95802098afe31499de8bd68d93106457377b3dcef6e1aa39012b850812bda26\": rpc error: code = NotFound desc = could not find container \"e95802098afe31499de8bd68d93106457377b3dcef6e1aa39012b850812bda26\": container with ID starting with e95802098afe31499de8bd68d93106457377b3dcef6e1aa39012b850812bda26 not found: ID does not exist" Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.261860 5039 scope.go:117] "RemoveContainer" containerID="452390df03b3a575c0a473b41ee9ad4d3ea5952a126283be9bbd4e001c716ec6" Nov 24 13:42:28 crc kubenswrapper[5039]: E1124 13:42:28.264693 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"452390df03b3a575c0a473b41ee9ad4d3ea5952a126283be9bbd4e001c716ec6\": container with ID starting with 452390df03b3a575c0a473b41ee9ad4d3ea5952a126283be9bbd4e001c716ec6 not found: ID does not exist" containerID="452390df03b3a575c0a473b41ee9ad4d3ea5952a126283be9bbd4e001c716ec6" Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.264719 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"452390df03b3a575c0a473b41ee9ad4d3ea5952a126283be9bbd4e001c716ec6"} err="failed to get container status \"452390df03b3a575c0a473b41ee9ad4d3ea5952a126283be9bbd4e001c716ec6\": rpc error: code = NotFound desc = could not find container \"452390df03b3a575c0a473b41ee9ad4d3ea5952a126283be9bbd4e001c716ec6\": container with ID starting with 452390df03b3a575c0a473b41ee9ad4d3ea5952a126283be9bbd4e001c716ec6 not found: ID does not exist" Nov 24 13:42:28 crc kubenswrapper[5039]: I1124 13:42:28.319832 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3688350-42c7-4e8e-886d-e4d3c718221f" path="/var/lib/kubelet/pods/f3688350-42c7-4e8e-886d-e4d3c718221f/volumes" Nov 24 13:42:31 crc kubenswrapper[5039]: I1124 13:42:31.609953 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-t8k26" Nov 24 13:42:31 crc kubenswrapper[5039]: I1124 13:42:31.690354 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-t8k26" Nov 24 13:42:32 crc kubenswrapper[5039]: I1124 13:42:32.643899 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-t8k26"] Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.194335 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-t8k26" podUID="b9a81aab-1978-4938-84f9-3a5511942ecb" containerName="registry-server" containerID="cri-o://40ebf768cb21e9ca3ed53e2047726477d380fd74584603614b82c8e7c9a9fc0b" gracePeriod=2 Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.723315 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t8k26" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.835055 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-c49v2"] Nov 24 13:42:33 crc kubenswrapper[5039]: E1124 13:42:33.836040 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3688350-42c7-4e8e-886d-e4d3c718221f" containerName="extract-utilities" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.836063 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3688350-42c7-4e8e-886d-e4d3c718221f" containerName="extract-utilities" Nov 24 13:42:33 crc kubenswrapper[5039]: E1124 13:42:33.836083 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9a81aab-1978-4938-84f9-3a5511942ecb" containerName="registry-server" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.836093 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9a81aab-1978-4938-84f9-3a5511942ecb" containerName="registry-server" Nov 24 13:42:33 crc kubenswrapper[5039]: E1124 13:42:33.836114 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9a81aab-1978-4938-84f9-3a5511942ecb" containerName="extract-content" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.836123 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9a81aab-1978-4938-84f9-3a5511942ecb" containerName="extract-content" Nov 24 13:42:33 crc kubenswrapper[5039]: E1124 13:42:33.836147 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc53aceb-f577-4036-88f5-76d8fe736cb0" containerName="mariadb-database-create" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.836155 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc53aceb-f577-4036-88f5-76d8fe736cb0" containerName="mariadb-database-create" Nov 24 13:42:33 crc kubenswrapper[5039]: E1124 13:42:33.836165 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="725180e7-824d-4133-ba16-8be24fa96cc9" containerName="mariadb-account-create" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.836173 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="725180e7-824d-4133-ba16-8be24fa96cc9" containerName="mariadb-account-create" Nov 24 13:42:33 crc kubenswrapper[5039]: E1124 13:42:33.836199 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9a81aab-1978-4938-84f9-3a5511942ecb" containerName="extract-utilities" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.836206 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9a81aab-1978-4938-84f9-3a5511942ecb" containerName="extract-utilities" Nov 24 13:42:33 crc kubenswrapper[5039]: E1124 13:42:33.836220 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3688350-42c7-4e8e-886d-e4d3c718221f" containerName="extract-content" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.836227 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3688350-42c7-4e8e-886d-e4d3c718221f" containerName="extract-content" Nov 24 13:42:33 crc kubenswrapper[5039]: E1124 13:42:33.836239 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3688350-42c7-4e8e-886d-e4d3c718221f" containerName="registry-server" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.836246 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3688350-42c7-4e8e-886d-e4d3c718221f" containerName="registry-server" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.836480 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3688350-42c7-4e8e-886d-e4d3c718221f" containerName="registry-server" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.836527 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc53aceb-f577-4036-88f5-76d8fe736cb0" containerName="mariadb-database-create" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.836544 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="725180e7-824d-4133-ba16-8be24fa96cc9" containerName="mariadb-account-create" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.836561 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9a81aab-1978-4938-84f9-3a5511942ecb" containerName="registry-server" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.837432 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-c49v2" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.840289 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.842404 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.842548 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.842726 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-k6pb7" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.851034 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6grcg\" (UniqueName: \"kubernetes.io/projected/b9a81aab-1978-4938-84f9-3a5511942ecb-kube-api-access-6grcg\") pod \"b9a81aab-1978-4938-84f9-3a5511942ecb\" (UID: \"b9a81aab-1978-4938-84f9-3a5511942ecb\") " Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.851154 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9a81aab-1978-4938-84f9-3a5511942ecb-utilities\") pod \"b9a81aab-1978-4938-84f9-3a5511942ecb\" (UID: \"b9a81aab-1978-4938-84f9-3a5511942ecb\") " Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.851203 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9a81aab-1978-4938-84f9-3a5511942ecb-catalog-content\") pod \"b9a81aab-1978-4938-84f9-3a5511942ecb\" (UID: \"b9a81aab-1978-4938-84f9-3a5511942ecb\") " Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.852018 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9a81aab-1978-4938-84f9-3a5511942ecb-utilities" (OuterVolumeSpecName: "utilities") pod "b9a81aab-1978-4938-84f9-3a5511942ecb" (UID: "b9a81aab-1978-4938-84f9-3a5511942ecb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.856228 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-c49v2"] Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.865976 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9a81aab-1978-4938-84f9-3a5511942ecb-kube-api-access-6grcg" (OuterVolumeSpecName: "kube-api-access-6grcg") pod "b9a81aab-1978-4938-84f9-3a5511942ecb" (UID: "b9a81aab-1978-4938-84f9-3a5511942ecb"). InnerVolumeSpecName "kube-api-access-6grcg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.908871 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9a81aab-1978-4938-84f9-3a5511942ecb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b9a81aab-1978-4938-84f9-3a5511942ecb" (UID: "b9a81aab-1978-4938-84f9-3a5511942ecb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.953794 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bbaa038-035d-4e44-ace2-4ac374ccc28a-combined-ca-bundle\") pod \"aodh-db-sync-c49v2\" (UID: \"0bbaa038-035d-4e44-ace2-4ac374ccc28a\") " pod="openstack/aodh-db-sync-c49v2" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.954010 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0bbaa038-035d-4e44-ace2-4ac374ccc28a-scripts\") pod \"aodh-db-sync-c49v2\" (UID: \"0bbaa038-035d-4e44-ace2-4ac374ccc28a\") " pod="openstack/aodh-db-sync-c49v2" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.954275 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8l72\" (UniqueName: \"kubernetes.io/projected/0bbaa038-035d-4e44-ace2-4ac374ccc28a-kube-api-access-v8l72\") pod \"aodh-db-sync-c49v2\" (UID: \"0bbaa038-035d-4e44-ace2-4ac374ccc28a\") " pod="openstack/aodh-db-sync-c49v2" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.954587 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bbaa038-035d-4e44-ace2-4ac374ccc28a-config-data\") pod \"aodh-db-sync-c49v2\" (UID: \"0bbaa038-035d-4e44-ace2-4ac374ccc28a\") " pod="openstack/aodh-db-sync-c49v2" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.954869 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9a81aab-1978-4938-84f9-3a5511942ecb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.954890 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6grcg\" (UniqueName: \"kubernetes.io/projected/b9a81aab-1978-4938-84f9-3a5511942ecb-kube-api-access-6grcg\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:33 crc kubenswrapper[5039]: I1124 13:42:33.954905 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9a81aab-1978-4938-84f9-3a5511942ecb-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.057138 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8l72\" (UniqueName: \"kubernetes.io/projected/0bbaa038-035d-4e44-ace2-4ac374ccc28a-kube-api-access-v8l72\") pod \"aodh-db-sync-c49v2\" (UID: \"0bbaa038-035d-4e44-ace2-4ac374ccc28a\") " pod="openstack/aodh-db-sync-c49v2" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.057560 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bbaa038-035d-4e44-ace2-4ac374ccc28a-config-data\") pod \"aodh-db-sync-c49v2\" (UID: \"0bbaa038-035d-4e44-ace2-4ac374ccc28a\") " pod="openstack/aodh-db-sync-c49v2" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.057743 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bbaa038-035d-4e44-ace2-4ac374ccc28a-combined-ca-bundle\") pod \"aodh-db-sync-c49v2\" (UID: \"0bbaa038-035d-4e44-ace2-4ac374ccc28a\") " pod="openstack/aodh-db-sync-c49v2" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.058636 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0bbaa038-035d-4e44-ace2-4ac374ccc28a-scripts\") pod \"aodh-db-sync-c49v2\" (UID: \"0bbaa038-035d-4e44-ace2-4ac374ccc28a\") " pod="openstack/aodh-db-sync-c49v2" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.061765 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0bbaa038-035d-4e44-ace2-4ac374ccc28a-scripts\") pod \"aodh-db-sync-c49v2\" (UID: \"0bbaa038-035d-4e44-ace2-4ac374ccc28a\") " pod="openstack/aodh-db-sync-c49v2" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.062552 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bbaa038-035d-4e44-ace2-4ac374ccc28a-config-data\") pod \"aodh-db-sync-c49v2\" (UID: \"0bbaa038-035d-4e44-ace2-4ac374ccc28a\") " pod="openstack/aodh-db-sync-c49v2" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.067772 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bbaa038-035d-4e44-ace2-4ac374ccc28a-combined-ca-bundle\") pod \"aodh-db-sync-c49v2\" (UID: \"0bbaa038-035d-4e44-ace2-4ac374ccc28a\") " pod="openstack/aodh-db-sync-c49v2" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.078705 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8l72\" (UniqueName: \"kubernetes.io/projected/0bbaa038-035d-4e44-ace2-4ac374ccc28a-kube-api-access-v8l72\") pod \"aodh-db-sync-c49v2\" (UID: \"0bbaa038-035d-4e44-ace2-4ac374ccc28a\") " pod="openstack/aodh-db-sync-c49v2" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.204588 5039 generic.go:334] "Generic (PLEG): container finished" podID="b9a81aab-1978-4938-84f9-3a5511942ecb" containerID="40ebf768cb21e9ca3ed53e2047726477d380fd74584603614b82c8e7c9a9fc0b" exitCode=0 Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.204938 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t8k26" event={"ID":"b9a81aab-1978-4938-84f9-3a5511942ecb","Type":"ContainerDied","Data":"40ebf768cb21e9ca3ed53e2047726477d380fd74584603614b82c8e7c9a9fc0b"} Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.205049 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t8k26" event={"ID":"b9a81aab-1978-4938-84f9-3a5511942ecb","Type":"ContainerDied","Data":"c5d276de16f8aa116c65efccc77b15c1b251c58e8359c612cb58abeda15b29c1"} Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.204982 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t8k26" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.205091 5039 scope.go:117] "RemoveContainer" containerID="40ebf768cb21e9ca3ed53e2047726477d380fd74584603614b82c8e7c9a9fc0b" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.213426 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-c49v2" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.245422 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-t8k26"] Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.252588 5039 scope.go:117] "RemoveContainer" containerID="006f0d90187e613602c5a4fd67e7298c66ddda83ed0f96974f35db44b9d4e266" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.254434 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-t8k26"] Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.319148 5039 scope.go:117] "RemoveContainer" containerID="60e5d4c578a56b52977bedad21f1c3c88e964cc1979fac5f1ff10b23aebf98dc" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.325308 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9a81aab-1978-4938-84f9-3a5511942ecb" path="/var/lib/kubelet/pods/b9a81aab-1978-4938-84f9-3a5511942ecb/volumes" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.411832 5039 scope.go:117] "RemoveContainer" containerID="40ebf768cb21e9ca3ed53e2047726477d380fd74584603614b82c8e7c9a9fc0b" Nov 24 13:42:34 crc kubenswrapper[5039]: E1124 13:42:34.420728 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"40ebf768cb21e9ca3ed53e2047726477d380fd74584603614b82c8e7c9a9fc0b\": container with ID starting with 40ebf768cb21e9ca3ed53e2047726477d380fd74584603614b82c8e7c9a9fc0b not found: ID does not exist" containerID="40ebf768cb21e9ca3ed53e2047726477d380fd74584603614b82c8e7c9a9fc0b" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.420766 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"40ebf768cb21e9ca3ed53e2047726477d380fd74584603614b82c8e7c9a9fc0b"} err="failed to get container status \"40ebf768cb21e9ca3ed53e2047726477d380fd74584603614b82c8e7c9a9fc0b\": rpc error: code = NotFound desc = could not find container \"40ebf768cb21e9ca3ed53e2047726477d380fd74584603614b82c8e7c9a9fc0b\": container with ID starting with 40ebf768cb21e9ca3ed53e2047726477d380fd74584603614b82c8e7c9a9fc0b not found: ID does not exist" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.420790 5039 scope.go:117] "RemoveContainer" containerID="006f0d90187e613602c5a4fd67e7298c66ddda83ed0f96974f35db44b9d4e266" Nov 24 13:42:34 crc kubenswrapper[5039]: E1124 13:42:34.427419 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"006f0d90187e613602c5a4fd67e7298c66ddda83ed0f96974f35db44b9d4e266\": container with ID starting with 006f0d90187e613602c5a4fd67e7298c66ddda83ed0f96974f35db44b9d4e266 not found: ID does not exist" containerID="006f0d90187e613602c5a4fd67e7298c66ddda83ed0f96974f35db44b9d4e266" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.427460 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"006f0d90187e613602c5a4fd67e7298c66ddda83ed0f96974f35db44b9d4e266"} err="failed to get container status \"006f0d90187e613602c5a4fd67e7298c66ddda83ed0f96974f35db44b9d4e266\": rpc error: code = NotFound desc = could not find container \"006f0d90187e613602c5a4fd67e7298c66ddda83ed0f96974f35db44b9d4e266\": container with ID starting with 006f0d90187e613602c5a4fd67e7298c66ddda83ed0f96974f35db44b9d4e266 not found: ID does not exist" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.427484 5039 scope.go:117] "RemoveContainer" containerID="60e5d4c578a56b52977bedad21f1c3c88e964cc1979fac5f1ff10b23aebf98dc" Nov 24 13:42:34 crc kubenswrapper[5039]: E1124 13:42:34.447584 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60e5d4c578a56b52977bedad21f1c3c88e964cc1979fac5f1ff10b23aebf98dc\": container with ID starting with 60e5d4c578a56b52977bedad21f1c3c88e964cc1979fac5f1ff10b23aebf98dc not found: ID does not exist" containerID="60e5d4c578a56b52977bedad21f1c3c88e964cc1979fac5f1ff10b23aebf98dc" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.447630 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60e5d4c578a56b52977bedad21f1c3c88e964cc1979fac5f1ff10b23aebf98dc"} err="failed to get container status \"60e5d4c578a56b52977bedad21f1c3c88e964cc1979fac5f1ff10b23aebf98dc\": rpc error: code = NotFound desc = could not find container \"60e5d4c578a56b52977bedad21f1c3c88e964cc1979fac5f1ff10b23aebf98dc\": container with ID starting with 60e5d4c578a56b52977bedad21f1c3c88e964cc1979fac5f1ff10b23aebf98dc not found: ID does not exist" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.521249 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 24 13:42:34 crc kubenswrapper[5039]: I1124 13:42:34.921181 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-c49v2"] Nov 24 13:42:34 crc kubenswrapper[5039]: W1124 13:42:34.922233 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0bbaa038_035d_4e44_ace2_4ac374ccc28a.slice/crio-ff0940924d3dce4581d11109e9a177ee1016e2b8675e4761ef0e1ef461e91c27 WatchSource:0}: Error finding container ff0940924d3dce4581d11109e9a177ee1016e2b8675e4761ef0e1ef461e91c27: Status 404 returned error can't find the container with id ff0940924d3dce4581d11109e9a177ee1016e2b8675e4761ef0e1ef461e91c27 Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.069765 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-dv6r5"] Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.071336 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-dv6r5" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.073208 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.073410 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.082853 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-dv6r5"] Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.183788 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5558fd75-638f-4d1e-b0d8-e8e071471415-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-dv6r5\" (UID: \"5558fd75-638f-4d1e-b0d8-e8e071471415\") " pod="openstack/nova-cell0-cell-mapping-dv6r5" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.183841 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgzsn\" (UniqueName: \"kubernetes.io/projected/5558fd75-638f-4d1e-b0d8-e8e071471415-kube-api-access-fgzsn\") pod \"nova-cell0-cell-mapping-dv6r5\" (UID: \"5558fd75-638f-4d1e-b0d8-e8e071471415\") " pod="openstack/nova-cell0-cell-mapping-dv6r5" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.183991 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5558fd75-638f-4d1e-b0d8-e8e071471415-scripts\") pod \"nova-cell0-cell-mapping-dv6r5\" (UID: \"5558fd75-638f-4d1e-b0d8-e8e071471415\") " pod="openstack/nova-cell0-cell-mapping-dv6r5" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.184162 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5558fd75-638f-4d1e-b0d8-e8e071471415-config-data\") pod \"nova-cell0-cell-mapping-dv6r5\" (UID: \"5558fd75-638f-4d1e-b0d8-e8e071471415\") " pod="openstack/nova-cell0-cell-mapping-dv6r5" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.221441 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-c49v2" event={"ID":"0bbaa038-035d-4e44-ace2-4ac374ccc28a","Type":"ContainerStarted","Data":"ff0940924d3dce4581d11109e9a177ee1016e2b8675e4761ef0e1ef461e91c27"} Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.254563 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.256383 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.259657 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.283411 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.286076 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5558fd75-638f-4d1e-b0d8-e8e071471415-config-data\") pod \"nova-cell0-cell-mapping-dv6r5\" (UID: \"5558fd75-638f-4d1e-b0d8-e8e071471415\") " pod="openstack/nova-cell0-cell-mapping-dv6r5" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.286331 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5558fd75-638f-4d1e-b0d8-e8e071471415-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-dv6r5\" (UID: \"5558fd75-638f-4d1e-b0d8-e8e071471415\") " pod="openstack/nova-cell0-cell-mapping-dv6r5" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.286382 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgzsn\" (UniqueName: \"kubernetes.io/projected/5558fd75-638f-4d1e-b0d8-e8e071471415-kube-api-access-fgzsn\") pod \"nova-cell0-cell-mapping-dv6r5\" (UID: \"5558fd75-638f-4d1e-b0d8-e8e071471415\") " pod="openstack/nova-cell0-cell-mapping-dv6r5" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.287345 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5558fd75-638f-4d1e-b0d8-e8e071471415-scripts\") pod \"nova-cell0-cell-mapping-dv6r5\" (UID: \"5558fd75-638f-4d1e-b0d8-e8e071471415\") " pod="openstack/nova-cell0-cell-mapping-dv6r5" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.317368 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5558fd75-638f-4d1e-b0d8-e8e071471415-config-data\") pod \"nova-cell0-cell-mapping-dv6r5\" (UID: \"5558fd75-638f-4d1e-b0d8-e8e071471415\") " pod="openstack/nova-cell0-cell-mapping-dv6r5" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.319690 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5558fd75-638f-4d1e-b0d8-e8e071471415-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-dv6r5\" (UID: \"5558fd75-638f-4d1e-b0d8-e8e071471415\") " pod="openstack/nova-cell0-cell-mapping-dv6r5" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.322051 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5558fd75-638f-4d1e-b0d8-e8e071471415-scripts\") pod \"nova-cell0-cell-mapping-dv6r5\" (UID: \"5558fd75-638f-4d1e-b0d8-e8e071471415\") " pod="openstack/nova-cell0-cell-mapping-dv6r5" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.330965 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgzsn\" (UniqueName: \"kubernetes.io/projected/5558fd75-638f-4d1e-b0d8-e8e071471415-kube-api-access-fgzsn\") pod \"nova-cell0-cell-mapping-dv6r5\" (UID: \"5558fd75-638f-4d1e-b0d8-e8e071471415\") " pod="openstack/nova-cell0-cell-mapping-dv6r5" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.363365 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.365590 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.370306 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.390644 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40c70f05-786b-43b9-9bb5-87b6f7907cc1-config-data\") pod \"nova-api-0\" (UID: \"40c70f05-786b-43b9-9bb5-87b6f7907cc1\") " pod="openstack/nova-api-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.390738 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40c70f05-786b-43b9-9bb5-87b6f7907cc1-logs\") pod \"nova-api-0\" (UID: \"40c70f05-786b-43b9-9bb5-87b6f7907cc1\") " pod="openstack/nova-api-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.390894 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtwsl\" (UniqueName: \"kubernetes.io/projected/40c70f05-786b-43b9-9bb5-87b6f7907cc1-kube-api-access-wtwsl\") pod \"nova-api-0\" (UID: \"40c70f05-786b-43b9-9bb5-87b6f7907cc1\") " pod="openstack/nova-api-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.391042 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40c70f05-786b-43b9-9bb5-87b6f7907cc1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"40c70f05-786b-43b9-9bb5-87b6f7907cc1\") " pod="openstack/nova-api-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.403307 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.409282 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-dv6r5" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.493729 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.495540 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.495811 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtwsl\" (UniqueName: \"kubernetes.io/projected/40c70f05-786b-43b9-9bb5-87b6f7907cc1-kube-api-access-wtwsl\") pod \"nova-api-0\" (UID: \"40c70f05-786b-43b9-9bb5-87b6f7907cc1\") " pod="openstack/nova-api-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.495908 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40c70f05-786b-43b9-9bb5-87b6f7907cc1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"40c70f05-786b-43b9-9bb5-87b6f7907cc1\") " pod="openstack/nova-api-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.496007 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"bde5bbf2-c2e9-42cb-8db9-5077e364f69a\") " pod="openstack/nova-metadata-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.496102 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-logs\") pod \"nova-metadata-0\" (UID: \"bde5bbf2-c2e9-42cb-8db9-5077e364f69a\") " pod="openstack/nova-metadata-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.496171 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40c70f05-786b-43b9-9bb5-87b6f7907cc1-config-data\") pod \"nova-api-0\" (UID: \"40c70f05-786b-43b9-9bb5-87b6f7907cc1\") " pod="openstack/nova-api-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.496204 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40c70f05-786b-43b9-9bb5-87b6f7907cc1-logs\") pod \"nova-api-0\" (UID: \"40c70f05-786b-43b9-9bb5-87b6f7907cc1\") " pod="openstack/nova-api-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.496245 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q77t5\" (UniqueName: \"kubernetes.io/projected/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-kube-api-access-q77t5\") pod \"nova-metadata-0\" (UID: \"bde5bbf2-c2e9-42cb-8db9-5077e364f69a\") " pod="openstack/nova-metadata-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.496272 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-config-data\") pod \"nova-metadata-0\" (UID: \"bde5bbf2-c2e9-42cb-8db9-5077e364f69a\") " pod="openstack/nova-metadata-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.497572 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40c70f05-786b-43b9-9bb5-87b6f7907cc1-logs\") pod \"nova-api-0\" (UID: \"40c70f05-786b-43b9-9bb5-87b6f7907cc1\") " pod="openstack/nova-api-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.500758 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.502311 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.507150 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.507333 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.528638 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40c70f05-786b-43b9-9bb5-87b6f7907cc1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"40c70f05-786b-43b9-9bb5-87b6f7907cc1\") " pod="openstack/nova-api-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.529456 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.531102 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtwsl\" (UniqueName: \"kubernetes.io/projected/40c70f05-786b-43b9-9bb5-87b6f7907cc1-kube-api-access-wtwsl\") pod \"nova-api-0\" (UID: \"40c70f05-786b-43b9-9bb5-87b6f7907cc1\") " pod="openstack/nova-api-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.539353 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40c70f05-786b-43b9-9bb5-87b6f7907cc1-config-data\") pod \"nova-api-0\" (UID: \"40c70f05-786b-43b9-9bb5-87b6f7907cc1\") " pod="openstack/nova-api-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.558287 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.584131 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.602518 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q77t5\" (UniqueName: \"kubernetes.io/projected/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-kube-api-access-q77t5\") pod \"nova-metadata-0\" (UID: \"bde5bbf2-c2e9-42cb-8db9-5077e364f69a\") " pod="openstack/nova-metadata-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.602568 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-config-data\") pod \"nova-metadata-0\" (UID: \"bde5bbf2-c2e9-42cb-8db9-5077e364f69a\") " pod="openstack/nova-metadata-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.602597 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/769a2230-2e35-4d97-94a2-5c9cfcf0e054-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"769a2230-2e35-4d97-94a2-5c9cfcf0e054\") " pod="openstack/nova-scheduler-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.602661 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/769a2230-2e35-4d97-94a2-5c9cfcf0e054-config-data\") pod \"nova-scheduler-0\" (UID: \"769a2230-2e35-4d97-94a2-5c9cfcf0e054\") " pod="openstack/nova-scheduler-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.602683 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e0d43dd-60a3-4898-b9ef-b2377a357dee-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"8e0d43dd-60a3-4898-b9ef-b2377a357dee\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.602722 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrg8z\" (UniqueName: \"kubernetes.io/projected/8e0d43dd-60a3-4898-b9ef-b2377a357dee-kube-api-access-mrg8z\") pod \"nova-cell1-novncproxy-0\" (UID: \"8e0d43dd-60a3-4898-b9ef-b2377a357dee\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.602753 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"bde5bbf2-c2e9-42cb-8db9-5077e364f69a\") " pod="openstack/nova-metadata-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.602776 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hcw2p\" (UniqueName: \"kubernetes.io/projected/769a2230-2e35-4d97-94a2-5c9cfcf0e054-kube-api-access-hcw2p\") pod \"nova-scheduler-0\" (UID: \"769a2230-2e35-4d97-94a2-5c9cfcf0e054\") " pod="openstack/nova-scheduler-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.602850 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-logs\") pod \"nova-metadata-0\" (UID: \"bde5bbf2-c2e9-42cb-8db9-5077e364f69a\") " pod="openstack/nova-metadata-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.602879 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e0d43dd-60a3-4898-b9ef-b2377a357dee-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"8e0d43dd-60a3-4898-b9ef-b2377a357dee\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.606020 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-logs\") pod \"nova-metadata-0\" (UID: \"bde5bbf2-c2e9-42cb-8db9-5077e364f69a\") " pod="openstack/nova-metadata-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.615606 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"bde5bbf2-c2e9-42cb-8db9-5077e364f69a\") " pod="openstack/nova-metadata-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.616242 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-config-data\") pod \"nova-metadata-0\" (UID: \"bde5bbf2-c2e9-42cb-8db9-5077e364f69a\") " pod="openstack/nova-metadata-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.623062 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-tztwk"] Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.625338 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.636247 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q77t5\" (UniqueName: \"kubernetes.io/projected/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-kube-api-access-q77t5\") pod \"nova-metadata-0\" (UID: \"bde5bbf2-c2e9-42cb-8db9-5077e364f69a\") " pod="openstack/nova-metadata-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.701553 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-tztwk"] Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.714809 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e0d43dd-60a3-4898-b9ef-b2377a357dee-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"8e0d43dd-60a3-4898-b9ef-b2377a357dee\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.714887 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-tztwk\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.715024 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrg8z\" (UniqueName: \"kubernetes.io/projected/8e0d43dd-60a3-4898-b9ef-b2377a357dee-kube-api-access-mrg8z\") pod \"nova-cell1-novncproxy-0\" (UID: \"8e0d43dd-60a3-4898-b9ef-b2377a357dee\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.715104 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-tztwk\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.715156 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hcw2p\" (UniqueName: \"kubernetes.io/projected/769a2230-2e35-4d97-94a2-5c9cfcf0e054-kube-api-access-hcw2p\") pod \"nova-scheduler-0\" (UID: \"769a2230-2e35-4d97-94a2-5c9cfcf0e054\") " pod="openstack/nova-scheduler-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.715305 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-tztwk\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.715382 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-tztwk\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.715412 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-config\") pod \"dnsmasq-dns-5fbc4d444f-tztwk\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.715453 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e0d43dd-60a3-4898-b9ef-b2377a357dee-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"8e0d43dd-60a3-4898-b9ef-b2377a357dee\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.715573 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/769a2230-2e35-4d97-94a2-5c9cfcf0e054-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"769a2230-2e35-4d97-94a2-5c9cfcf0e054\") " pod="openstack/nova-scheduler-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.715596 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnf6s\" (UniqueName: \"kubernetes.io/projected/2b7e58ca-126f-4175-8e81-8311a1de04b4-kube-api-access-mnf6s\") pod \"dnsmasq-dns-5fbc4d444f-tztwk\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.715790 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/769a2230-2e35-4d97-94a2-5c9cfcf0e054-config-data\") pod \"nova-scheduler-0\" (UID: \"769a2230-2e35-4d97-94a2-5c9cfcf0e054\") " pod="openstack/nova-scheduler-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.723659 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e0d43dd-60a3-4898-b9ef-b2377a357dee-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"8e0d43dd-60a3-4898-b9ef-b2377a357dee\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.726148 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.726841 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e0d43dd-60a3-4898-b9ef-b2377a357dee-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"8e0d43dd-60a3-4898-b9ef-b2377a357dee\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.728108 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/769a2230-2e35-4d97-94a2-5c9cfcf0e054-config-data\") pod \"nova-scheduler-0\" (UID: \"769a2230-2e35-4d97-94a2-5c9cfcf0e054\") " pod="openstack/nova-scheduler-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.728248 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/769a2230-2e35-4d97-94a2-5c9cfcf0e054-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"769a2230-2e35-4d97-94a2-5c9cfcf0e054\") " pod="openstack/nova-scheduler-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.741326 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hcw2p\" (UniqueName: \"kubernetes.io/projected/769a2230-2e35-4d97-94a2-5c9cfcf0e054-kube-api-access-hcw2p\") pod \"nova-scheduler-0\" (UID: \"769a2230-2e35-4d97-94a2-5c9cfcf0e054\") " pod="openstack/nova-scheduler-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.747913 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrg8z\" (UniqueName: \"kubernetes.io/projected/8e0d43dd-60a3-4898-b9ef-b2377a357dee-kube-api-access-mrg8z\") pod \"nova-cell1-novncproxy-0\" (UID: \"8e0d43dd-60a3-4898-b9ef-b2377a357dee\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.760243 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.821950 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-tztwk\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.826872 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-tztwk\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.826941 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-tztwk\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.826974 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-config\") pod \"dnsmasq-dns-5fbc4d444f-tztwk\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.827092 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnf6s\" (UniqueName: \"kubernetes.io/projected/2b7e58ca-126f-4175-8e81-8311a1de04b4-kube-api-access-mnf6s\") pod \"dnsmasq-dns-5fbc4d444f-tztwk\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.827238 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-tztwk\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.822993 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-tztwk\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.828752 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-tztwk\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.830589 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-tztwk\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.832253 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-tztwk\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.836186 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-config\") pod \"dnsmasq-dns-5fbc4d444f-tztwk\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:35 crc kubenswrapper[5039]: I1124 13:42:35.854120 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnf6s\" (UniqueName: \"kubernetes.io/projected/2b7e58ca-126f-4175-8e81-8311a1de04b4-kube-api-access-mnf6s\") pod \"dnsmasq-dns-5fbc4d444f-tztwk\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:36 crc kubenswrapper[5039]: I1124 13:42:36.045778 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:42:36 crc kubenswrapper[5039]: I1124 13:42:36.071776 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:36 crc kubenswrapper[5039]: I1124 13:42:36.283585 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-dv6r5"] Nov 24 13:42:36 crc kubenswrapper[5039]: W1124 13:42:36.312697 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5558fd75_638f_4d1e_b0d8_e8e071471415.slice/crio-eabc9c79c701937b8c14c0fb535432b6c17fc3b940b0eb960b8744b566556654 WatchSource:0}: Error finding container eabc9c79c701937b8c14c0fb535432b6c17fc3b940b0eb960b8744b566556654: Status 404 returned error can't find the container with id eabc9c79c701937b8c14c0fb535432b6c17fc3b940b0eb960b8744b566556654 Nov 24 13:42:36 crc kubenswrapper[5039]: I1124 13:42:36.472922 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 13:42:36 crc kubenswrapper[5039]: I1124 13:42:36.944598 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 13:42:36 crc kubenswrapper[5039]: I1124 13:42:36.995869 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-tztwk"] Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.004463 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.088868 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-968s2"] Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.096346 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-968s2" Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.105558 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-968s2"] Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.105913 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.105951 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.181876 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d757ee4-2d14-4644-9323-955537ee639e-scripts\") pod \"nova-cell1-conductor-db-sync-968s2\" (UID: \"9d757ee4-2d14-4644-9323-955537ee639e\") " pod="openstack/nova-cell1-conductor-db-sync-968s2" Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.181919 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khtgz\" (UniqueName: \"kubernetes.io/projected/9d757ee4-2d14-4644-9323-955537ee639e-kube-api-access-khtgz\") pod \"nova-cell1-conductor-db-sync-968s2\" (UID: \"9d757ee4-2d14-4644-9323-955537ee639e\") " pod="openstack/nova-cell1-conductor-db-sync-968s2" Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.181973 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d757ee4-2d14-4644-9323-955537ee639e-config-data\") pod \"nova-cell1-conductor-db-sync-968s2\" (UID: \"9d757ee4-2d14-4644-9323-955537ee639e\") " pod="openstack/nova-cell1-conductor-db-sync-968s2" Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.182127 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d757ee4-2d14-4644-9323-955537ee639e-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-968s2\" (UID: \"9d757ee4-2d14-4644-9323-955537ee639e\") " pod="openstack/nova-cell1-conductor-db-sync-968s2" Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.223326 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.265066 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-dv6r5" event={"ID":"5558fd75-638f-4d1e-b0d8-e8e071471415","Type":"ContainerStarted","Data":"528e41cdbf10f445277ad9bbd7e0cee1cb39b9e0b270368cb046d67b713f70e2"} Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.265348 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-dv6r5" event={"ID":"5558fd75-638f-4d1e-b0d8-e8e071471415","Type":"ContainerStarted","Data":"eabc9c79c701937b8c14c0fb535432b6c17fc3b940b0eb960b8744b566556654"} Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.273088 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"40c70f05-786b-43b9-9bb5-87b6f7907cc1","Type":"ContainerStarted","Data":"c081b95bd24c19ea3ffac6d70980e28755994ecbf10c0cd51c96bfbad2d3eec9"} Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.284252 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bde5bbf2-c2e9-42cb-8db9-5077e364f69a","Type":"ContainerStarted","Data":"ee00cfbf9fe968e9637cd0807e465777a5a0f7f9fb1b696999d2a75af1b54d33"} Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.286914 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d757ee4-2d14-4644-9323-955537ee639e-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-968s2\" (UID: \"9d757ee4-2d14-4644-9323-955537ee639e\") " pod="openstack/nova-cell1-conductor-db-sync-968s2" Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.287129 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d757ee4-2d14-4644-9323-955537ee639e-scripts\") pod \"nova-cell1-conductor-db-sync-968s2\" (UID: \"9d757ee4-2d14-4644-9323-955537ee639e\") " pod="openstack/nova-cell1-conductor-db-sync-968s2" Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.287167 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khtgz\" (UniqueName: \"kubernetes.io/projected/9d757ee4-2d14-4644-9323-955537ee639e-kube-api-access-khtgz\") pod \"nova-cell1-conductor-db-sync-968s2\" (UID: \"9d757ee4-2d14-4644-9323-955537ee639e\") " pod="openstack/nova-cell1-conductor-db-sync-968s2" Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.287227 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d757ee4-2d14-4644-9323-955537ee639e-config-data\") pod \"nova-cell1-conductor-db-sync-968s2\" (UID: \"9d757ee4-2d14-4644-9323-955537ee639e\") " pod="openstack/nova-cell1-conductor-db-sync-968s2" Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.291765 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" event={"ID":"2b7e58ca-126f-4175-8e81-8311a1de04b4","Type":"ContainerStarted","Data":"f9abc6cd4e5fe2cdb72b5744c49e753a52629d9d0366cf8caecccb5cae55d997"} Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.292486 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d757ee4-2d14-4644-9323-955537ee639e-scripts\") pod \"nova-cell1-conductor-db-sync-968s2\" (UID: \"9d757ee4-2d14-4644-9323-955537ee639e\") " pod="openstack/nova-cell1-conductor-db-sync-968s2" Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.293532 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d757ee4-2d14-4644-9323-955537ee639e-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-968s2\" (UID: \"9d757ee4-2d14-4644-9323-955537ee639e\") " pod="openstack/nova-cell1-conductor-db-sync-968s2" Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.293540 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d757ee4-2d14-4644-9323-955537ee639e-config-data\") pod \"nova-cell1-conductor-db-sync-968s2\" (UID: \"9d757ee4-2d14-4644-9323-955537ee639e\") " pod="openstack/nova-cell1-conductor-db-sync-968s2" Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.295124 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-dv6r5" podStartSLOduration=2.295109467 podStartE2EDuration="2.295109467s" podCreationTimestamp="2025-11-24 13:42:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:42:37.284697901 +0000 UTC m=+1469.723822421" watchObservedRunningTime="2025-11-24 13:42:37.295109467 +0000 UTC m=+1469.734233967" Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.305630 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"769a2230-2e35-4d97-94a2-5c9cfcf0e054","Type":"ContainerStarted","Data":"9815c02048f35b241f9fea9fb8e9be5c63e7218964e69c72cd1cc4910f768d64"} Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.314814 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khtgz\" (UniqueName: \"kubernetes.io/projected/9d757ee4-2d14-4644-9323-955537ee639e-kube-api-access-khtgz\") pod \"nova-cell1-conductor-db-sync-968s2\" (UID: \"9d757ee4-2d14-4644-9323-955537ee639e\") " pod="openstack/nova-cell1-conductor-db-sync-968s2" Nov 24 13:42:37 crc kubenswrapper[5039]: I1124 13:42:37.432794 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-968s2" Nov 24 13:42:38 crc kubenswrapper[5039]: I1124 13:42:38.054335 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-968s2"] Nov 24 13:42:38 crc kubenswrapper[5039]: I1124 13:42:38.331709 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-968s2" event={"ID":"9d757ee4-2d14-4644-9323-955537ee639e","Type":"ContainerStarted","Data":"32a7b0522186879cc19cf9f74893da13a841469614cfc5909198e542c31beb60"} Nov 24 13:42:38 crc kubenswrapper[5039]: I1124 13:42:38.341162 5039 generic.go:334] "Generic (PLEG): container finished" podID="2b7e58ca-126f-4175-8e81-8311a1de04b4" containerID="304b4dbf35849b99ad645a5fee388de877d819e3c2a42e78628f36290ca68d1d" exitCode=0 Nov 24 13:42:38 crc kubenswrapper[5039]: I1124 13:42:38.341278 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" event={"ID":"2b7e58ca-126f-4175-8e81-8311a1de04b4","Type":"ContainerDied","Data":"304b4dbf35849b99ad645a5fee388de877d819e3c2a42e78628f36290ca68d1d"} Nov 24 13:42:38 crc kubenswrapper[5039]: I1124 13:42:38.344236 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"8e0d43dd-60a3-4898-b9ef-b2377a357dee","Type":"ContainerStarted","Data":"7447c540250dd333b391d415e1dcde27777e50eb15c922d48a763cba4cd242a2"} Nov 24 13:42:39 crc kubenswrapper[5039]: I1124 13:42:39.367312 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-968s2" event={"ID":"9d757ee4-2d14-4644-9323-955537ee639e","Type":"ContainerStarted","Data":"b4d85e4d184e4998db46db0b1205a979b16c3d967c5396ba0647aaf389585711"} Nov 24 13:42:39 crc kubenswrapper[5039]: I1124 13:42:39.375459 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" event={"ID":"2b7e58ca-126f-4175-8e81-8311a1de04b4","Type":"ContainerStarted","Data":"5f8fdd7de7be0c4c0d3edbfc3db5dfdac02d2a635b401e8ecfe235dd8f121017"} Nov 24 13:42:39 crc kubenswrapper[5039]: I1124 13:42:39.375707 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:39 crc kubenswrapper[5039]: I1124 13:42:39.418323 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-968s2" podStartSLOduration=2.418299062 podStartE2EDuration="2.418299062s" podCreationTimestamp="2025-11-24 13:42:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:42:39.395038098 +0000 UTC m=+1471.834162598" watchObservedRunningTime="2025-11-24 13:42:39.418299062 +0000 UTC m=+1471.857423572" Nov 24 13:42:39 crc kubenswrapper[5039]: I1124 13:42:39.507712 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" podStartSLOduration=4.5076751040000005 podStartE2EDuration="4.507675104s" podCreationTimestamp="2025-11-24 13:42:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:42:39.450460484 +0000 UTC m=+1471.889584984" watchObservedRunningTime="2025-11-24 13:42:39.507675104 +0000 UTC m=+1471.946799604" Nov 24 13:42:39 crc kubenswrapper[5039]: I1124 13:42:39.824312 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 13:42:39 crc kubenswrapper[5039]: I1124 13:42:39.835841 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.464223 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"8e0d43dd-60a3-4898-b9ef-b2377a357dee","Type":"ContainerStarted","Data":"e7a7ba5c49f56245d5f301800daf3e5f4976fdcf6558b6a705ef6b177a1a7214"} Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.464293 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="8e0d43dd-60a3-4898-b9ef-b2377a357dee" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://e7a7ba5c49f56245d5f301800daf3e5f4976fdcf6558b6a705ef6b177a1a7214" gracePeriod=30 Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.467964 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-c49v2" event={"ID":"0bbaa038-035d-4e44-ace2-4ac374ccc28a","Type":"ContainerStarted","Data":"807e0bfefe975cd3524f0f960ea593d92b4f19f497a08465800071116473344b"} Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.472263 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"40c70f05-786b-43b9-9bb5-87b6f7907cc1","Type":"ContainerStarted","Data":"3e8ad326f0e5aa69207aebcad58d3d771092003f9982ff1ef4cf628a7d8d46a7"} Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.472330 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"40c70f05-786b-43b9-9bb5-87b6f7907cc1","Type":"ContainerStarted","Data":"8a5598eb8354cf4b2962d73b351825d07554d321089648b8edee6aa50e8516c8"} Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.478426 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bde5bbf2-c2e9-42cb-8db9-5077e364f69a","Type":"ContainerStarted","Data":"cd32a0ecfe9e3a7099960a08000d88ead31c6cb21106549d214660e44c7e6ded"} Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.478481 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="bde5bbf2-c2e9-42cb-8db9-5077e364f69a" containerName="nova-metadata-log" containerID="cri-o://8bf1ee6d5ccd8613e701229d2aa1bede8ea6165f5b0702fb6246fc15fb617810" gracePeriod=30 Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.478565 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bde5bbf2-c2e9-42cb-8db9-5077e364f69a","Type":"ContainerStarted","Data":"8bf1ee6d5ccd8613e701229d2aa1bede8ea6165f5b0702fb6246fc15fb617810"} Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.478606 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="bde5bbf2-c2e9-42cb-8db9-5077e364f69a" containerName="nova-metadata-metadata" containerID="cri-o://cd32a0ecfe9e3a7099960a08000d88ead31c6cb21106549d214660e44c7e6ded" gracePeriod=30 Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.490461 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"769a2230-2e35-4d97-94a2-5c9cfcf0e054","Type":"ContainerStarted","Data":"8e3dfc9d1f8f4bbbf46e789329a83cd07a146c178c8603dc743670c0b0926b7a"} Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.490899 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.58825195 podStartE2EDuration="10.490880447s" podCreationTimestamp="2025-11-24 13:42:35 +0000 UTC" firstStartedPulling="2025-11-24 13:42:37.26396888 +0000 UTC m=+1469.703093380" lastFinishedPulling="2025-11-24 13:42:44.166597377 +0000 UTC m=+1476.605721877" observedRunningTime="2025-11-24 13:42:45.483833093 +0000 UTC m=+1477.922957603" watchObservedRunningTime="2025-11-24 13:42:45.490880447 +0000 UTC m=+1477.930004957" Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.508459 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-c49v2" podStartSLOduration=3.192119279 podStartE2EDuration="12.508441829s" podCreationTimestamp="2025-11-24 13:42:33 +0000 UTC" firstStartedPulling="2025-11-24 13:42:34.925691116 +0000 UTC m=+1467.364815616" lastFinishedPulling="2025-11-24 13:42:44.242013666 +0000 UTC m=+1476.681138166" observedRunningTime="2025-11-24 13:42:45.502856441 +0000 UTC m=+1477.941980951" watchObservedRunningTime="2025-11-24 13:42:45.508441829 +0000 UTC m=+1477.947566339" Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.530765 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.8386592 podStartE2EDuration="10.530744819s" podCreationTimestamp="2025-11-24 13:42:35 +0000 UTC" firstStartedPulling="2025-11-24 13:42:36.474490378 +0000 UTC m=+1468.913614888" lastFinishedPulling="2025-11-24 13:42:44.166576007 +0000 UTC m=+1476.605700507" observedRunningTime="2025-11-24 13:42:45.5218467 +0000 UTC m=+1477.960971210" watchObservedRunningTime="2025-11-24 13:42:45.530744819 +0000 UTC m=+1477.969869329" Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.544969 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.324617665 podStartE2EDuration="10.5449526s" podCreationTimestamp="2025-11-24 13:42:35 +0000 UTC" firstStartedPulling="2025-11-24 13:42:36.980451295 +0000 UTC m=+1469.419575795" lastFinishedPulling="2025-11-24 13:42:44.20078623 +0000 UTC m=+1476.639910730" observedRunningTime="2025-11-24 13:42:45.538985182 +0000 UTC m=+1477.978109682" watchObservedRunningTime="2025-11-24 13:42:45.5449526 +0000 UTC m=+1477.984077100" Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.568902 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.382173572 podStartE2EDuration="10.568885019s" podCreationTimestamp="2025-11-24 13:42:35 +0000 UTC" firstStartedPulling="2025-11-24 13:42:36.97986256 +0000 UTC m=+1469.418987060" lastFinishedPulling="2025-11-24 13:42:44.166574007 +0000 UTC m=+1476.605698507" observedRunningTime="2025-11-24 13:42:45.56285785 +0000 UTC m=+1478.001982350" watchObservedRunningTime="2025-11-24 13:42:45.568885019 +0000 UTC m=+1478.008009519" Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.585682 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.585723 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.726840 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.726971 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.760940 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.761097 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 24 13:42:45 crc kubenswrapper[5039]: I1124 13:42:45.795441 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 24 13:42:46 crc kubenswrapper[5039]: I1124 13:42:46.046518 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:42:46 crc kubenswrapper[5039]: I1124 13:42:46.073622 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:42:46 crc kubenswrapper[5039]: I1124 13:42:46.157343 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-sfp6r"] Nov 24 13:42:46 crc kubenswrapper[5039]: I1124 13:42:46.157640 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" podUID="34b45a67-b6e3-40cb-ad22-52fc9e26292e" containerName="dnsmasq-dns" containerID="cri-o://c756125ac99fa60a53a8cc12394dc3de2a94ea95034fd695efb28614da375b93" gracePeriod=10 Nov 24 13:42:46 crc kubenswrapper[5039]: I1124 13:42:46.578308 5039 generic.go:334] "Generic (PLEG): container finished" podID="bde5bbf2-c2e9-42cb-8db9-5077e364f69a" containerID="cd32a0ecfe9e3a7099960a08000d88ead31c6cb21106549d214660e44c7e6ded" exitCode=0 Nov 24 13:42:46 crc kubenswrapper[5039]: I1124 13:42:46.578610 5039 generic.go:334] "Generic (PLEG): container finished" podID="bde5bbf2-c2e9-42cb-8db9-5077e364f69a" containerID="8bf1ee6d5ccd8613e701229d2aa1bede8ea6165f5b0702fb6246fc15fb617810" exitCode=143 Nov 24 13:42:46 crc kubenswrapper[5039]: I1124 13:42:46.578639 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bde5bbf2-c2e9-42cb-8db9-5077e364f69a","Type":"ContainerDied","Data":"cd32a0ecfe9e3a7099960a08000d88ead31c6cb21106549d214660e44c7e6ded"} Nov 24 13:42:46 crc kubenswrapper[5039]: I1124 13:42:46.578685 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bde5bbf2-c2e9-42cb-8db9-5077e364f69a","Type":"ContainerDied","Data":"8bf1ee6d5ccd8613e701229d2aa1bede8ea6165f5b0702fb6246fc15fb617810"} Nov 24 13:42:46 crc kubenswrapper[5039]: I1124 13:42:46.594252 5039 generic.go:334] "Generic (PLEG): container finished" podID="34b45a67-b6e3-40cb-ad22-52fc9e26292e" containerID="c756125ac99fa60a53a8cc12394dc3de2a94ea95034fd695efb28614da375b93" exitCode=0 Nov 24 13:42:46 crc kubenswrapper[5039]: I1124 13:42:46.594343 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" event={"ID":"34b45a67-b6e3-40cb-ad22-52fc9e26292e","Type":"ContainerDied","Data":"c756125ac99fa60a53a8cc12394dc3de2a94ea95034fd695efb28614da375b93"} Nov 24 13:42:46 crc kubenswrapper[5039]: I1124 13:42:46.634183 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 24 13:42:46 crc kubenswrapper[5039]: I1124 13:42:46.681493 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="40c70f05-786b-43b9-9bb5-87b6f7907cc1" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.225:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 24 13:42:46 crc kubenswrapper[5039]: I1124 13:42:46.693705 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="40c70f05-786b-43b9-9bb5-87b6f7907cc1" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.225:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 24 13:42:46 crc kubenswrapper[5039]: I1124 13:42:46.893657 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 13:42:46 crc kubenswrapper[5039]: I1124 13:42:46.910681 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.025092 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-dns-svc\") pod \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.025141 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-ovsdbserver-sb\") pod \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.025243 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-config\") pod \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.025325 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-dns-swift-storage-0\") pod \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.025370 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-combined-ca-bundle\") pod \"bde5bbf2-c2e9-42cb-8db9-5077e364f69a\" (UID: \"bde5bbf2-c2e9-42cb-8db9-5077e364f69a\") " Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.025387 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4srnb\" (UniqueName: \"kubernetes.io/projected/34b45a67-b6e3-40cb-ad22-52fc9e26292e-kube-api-access-4srnb\") pod \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.025401 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-ovsdbserver-nb\") pod \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\" (UID: \"34b45a67-b6e3-40cb-ad22-52fc9e26292e\") " Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.025437 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-config-data\") pod \"bde5bbf2-c2e9-42cb-8db9-5077e364f69a\" (UID: \"bde5bbf2-c2e9-42cb-8db9-5077e364f69a\") " Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.025537 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-logs\") pod \"bde5bbf2-c2e9-42cb-8db9-5077e364f69a\" (UID: \"bde5bbf2-c2e9-42cb-8db9-5077e364f69a\") " Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.025563 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q77t5\" (UniqueName: \"kubernetes.io/projected/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-kube-api-access-q77t5\") pod \"bde5bbf2-c2e9-42cb-8db9-5077e364f69a\" (UID: \"bde5bbf2-c2e9-42cb-8db9-5077e364f69a\") " Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.031362 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-logs" (OuterVolumeSpecName: "logs") pod "bde5bbf2-c2e9-42cb-8db9-5077e364f69a" (UID: "bde5bbf2-c2e9-42cb-8db9-5077e364f69a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.055457 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34b45a67-b6e3-40cb-ad22-52fc9e26292e-kube-api-access-4srnb" (OuterVolumeSpecName: "kube-api-access-4srnb") pod "34b45a67-b6e3-40cb-ad22-52fc9e26292e" (UID: "34b45a67-b6e3-40cb-ad22-52fc9e26292e"). InnerVolumeSpecName "kube-api-access-4srnb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.074745 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bde5bbf2-c2e9-42cb-8db9-5077e364f69a" (UID: "bde5bbf2-c2e9-42cb-8db9-5077e364f69a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.086187 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-config-data" (OuterVolumeSpecName: "config-data") pod "bde5bbf2-c2e9-42cb-8db9-5077e364f69a" (UID: "bde5bbf2-c2e9-42cb-8db9-5077e364f69a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.097175 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-kube-api-access-q77t5" (OuterVolumeSpecName: "kube-api-access-q77t5") pod "bde5bbf2-c2e9-42cb-8db9-5077e364f69a" (UID: "bde5bbf2-c2e9-42cb-8db9-5077e364f69a"). InnerVolumeSpecName "kube-api-access-q77t5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.127933 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.127968 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4srnb\" (UniqueName: \"kubernetes.io/projected/34b45a67-b6e3-40cb-ad22-52fc9e26292e-kube-api-access-4srnb\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.127981 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.127989 5039 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-logs\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.127997 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q77t5\" (UniqueName: \"kubernetes.io/projected/bde5bbf2-c2e9-42cb-8db9-5077e364f69a-kube-api-access-q77t5\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.173582 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "34b45a67-b6e3-40cb-ad22-52fc9e26292e" (UID: "34b45a67-b6e3-40cb-ad22-52fc9e26292e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.195148 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-config" (OuterVolumeSpecName: "config") pod "34b45a67-b6e3-40cb-ad22-52fc9e26292e" (UID: "34b45a67-b6e3-40cb-ad22-52fc9e26292e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.200887 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "34b45a67-b6e3-40cb-ad22-52fc9e26292e" (UID: "34b45a67-b6e3-40cb-ad22-52fc9e26292e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.204735 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "34b45a67-b6e3-40cb-ad22-52fc9e26292e" (UID: "34b45a67-b6e3-40cb-ad22-52fc9e26292e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.206092 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "34b45a67-b6e3-40cb-ad22-52fc9e26292e" (UID: "34b45a67-b6e3-40cb-ad22-52fc9e26292e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.230460 5039 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.230605 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.230624 5039 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.230636 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.230649 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34b45a67-b6e3-40cb-ad22-52fc9e26292e-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.607893 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" event={"ID":"34b45a67-b6e3-40cb-ad22-52fc9e26292e","Type":"ContainerDied","Data":"473ea016fe3b210d1934ec388f19d86f671755386615b7fca8303f0654276c84"} Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.607923 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-sfp6r" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.607968 5039 scope.go:117] "RemoveContainer" containerID="c756125ac99fa60a53a8cc12394dc3de2a94ea95034fd695efb28614da375b93" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.610199 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bde5bbf2-c2e9-42cb-8db9-5077e364f69a","Type":"ContainerDied","Data":"ee00cfbf9fe968e9637cd0807e465777a5a0f7f9fb1b696999d2a75af1b54d33"} Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.610345 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.638236 5039 scope.go:117] "RemoveContainer" containerID="e6cf42989e392131e09aace2b212e20e38fffa2c02da6271ff4e65c554a88e2d" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.713043 5039 scope.go:117] "RemoveContainer" containerID="cd32a0ecfe9e3a7099960a08000d88ead31c6cb21106549d214660e44c7e6ded" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.719058 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.734028 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.752069 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 24 13:42:47 crc kubenswrapper[5039]: E1124 13:42:47.752663 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34b45a67-b6e3-40cb-ad22-52fc9e26292e" containerName="dnsmasq-dns" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.752688 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="34b45a67-b6e3-40cb-ad22-52fc9e26292e" containerName="dnsmasq-dns" Nov 24 13:42:47 crc kubenswrapper[5039]: E1124 13:42:47.752713 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34b45a67-b6e3-40cb-ad22-52fc9e26292e" containerName="init" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.752723 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="34b45a67-b6e3-40cb-ad22-52fc9e26292e" containerName="init" Nov 24 13:42:47 crc kubenswrapper[5039]: E1124 13:42:47.752745 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bde5bbf2-c2e9-42cb-8db9-5077e364f69a" containerName="nova-metadata-metadata" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.752753 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="bde5bbf2-c2e9-42cb-8db9-5077e364f69a" containerName="nova-metadata-metadata" Nov 24 13:42:47 crc kubenswrapper[5039]: E1124 13:42:47.752794 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bde5bbf2-c2e9-42cb-8db9-5077e364f69a" containerName="nova-metadata-log" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.752804 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="bde5bbf2-c2e9-42cb-8db9-5077e364f69a" containerName="nova-metadata-log" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.753051 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="bde5bbf2-c2e9-42cb-8db9-5077e364f69a" containerName="nova-metadata-log" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.753076 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="bde5bbf2-c2e9-42cb-8db9-5077e364f69a" containerName="nova-metadata-metadata" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.753091 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="34b45a67-b6e3-40cb-ad22-52fc9e26292e" containerName="dnsmasq-dns" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.754493 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.760338 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.760958 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.763470 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-sfp6r"] Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.776038 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-sfp6r"] Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.790554 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.828516 5039 scope.go:117] "RemoveContainer" containerID="8bf1ee6d5ccd8613e701229d2aa1bede8ea6165f5b0702fb6246fc15fb617810" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.844862 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-config-data\") pod \"nova-metadata-0\" (UID: \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\") " pod="openstack/nova-metadata-0" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.845212 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-logs\") pod \"nova-metadata-0\" (UID: \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\") " pod="openstack/nova-metadata-0" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.845279 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\") " pod="openstack/nova-metadata-0" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.845383 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\") " pod="openstack/nova-metadata-0" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.845614 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkmb4\" (UniqueName: \"kubernetes.io/projected/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-kube-api-access-xkmb4\") pod \"nova-metadata-0\" (UID: \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\") " pod="openstack/nova-metadata-0" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.947461 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-logs\") pod \"nova-metadata-0\" (UID: \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\") " pod="openstack/nova-metadata-0" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.947799 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\") " pod="openstack/nova-metadata-0" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.947950 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\") " pod="openstack/nova-metadata-0" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.948044 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-logs\") pod \"nova-metadata-0\" (UID: \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\") " pod="openstack/nova-metadata-0" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.948194 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkmb4\" (UniqueName: \"kubernetes.io/projected/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-kube-api-access-xkmb4\") pod \"nova-metadata-0\" (UID: \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\") " pod="openstack/nova-metadata-0" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.948279 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-config-data\") pod \"nova-metadata-0\" (UID: \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\") " pod="openstack/nova-metadata-0" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.951823 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\") " pod="openstack/nova-metadata-0" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.954039 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-config-data\") pod \"nova-metadata-0\" (UID: \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\") " pod="openstack/nova-metadata-0" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.955420 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\") " pod="openstack/nova-metadata-0" Nov 24 13:42:47 crc kubenswrapper[5039]: I1124 13:42:47.965329 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkmb4\" (UniqueName: \"kubernetes.io/projected/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-kube-api-access-xkmb4\") pod \"nova-metadata-0\" (UID: \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\") " pod="openstack/nova-metadata-0" Nov 24 13:42:48 crc kubenswrapper[5039]: I1124 13:42:48.130363 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 13:42:48 crc kubenswrapper[5039]: I1124 13:42:48.320779 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34b45a67-b6e3-40cb-ad22-52fc9e26292e" path="/var/lib/kubelet/pods/34b45a67-b6e3-40cb-ad22-52fc9e26292e/volumes" Nov 24 13:42:48 crc kubenswrapper[5039]: I1124 13:42:48.322045 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bde5bbf2-c2e9-42cb-8db9-5077e364f69a" path="/var/lib/kubelet/pods/bde5bbf2-c2e9-42cb-8db9-5077e364f69a/volumes" Nov 24 13:42:48 crc kubenswrapper[5039]: I1124 13:42:48.617833 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 13:42:48 crc kubenswrapper[5039]: W1124 13:42:48.624836 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2859d6eb_5fec_4b3a_a6b0_abfe7cb6c380.slice/crio-d80ccbcfa6e80d45a2e2c3365478aa053ba6dbd556a3500c5760aff6180c9725 WatchSource:0}: Error finding container d80ccbcfa6e80d45a2e2c3365478aa053ba6dbd556a3500c5760aff6180c9725: Status 404 returned error can't find the container with id d80ccbcfa6e80d45a2e2c3365478aa053ba6dbd556a3500c5760aff6180c9725 Nov 24 13:42:48 crc kubenswrapper[5039]: I1124 13:42:48.626100 5039 generic.go:334] "Generic (PLEG): container finished" podID="5558fd75-638f-4d1e-b0d8-e8e071471415" containerID="528e41cdbf10f445277ad9bbd7e0cee1cb39b9e0b270368cb046d67b713f70e2" exitCode=0 Nov 24 13:42:48 crc kubenswrapper[5039]: I1124 13:42:48.626175 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-dv6r5" event={"ID":"5558fd75-638f-4d1e-b0d8-e8e071471415","Type":"ContainerDied","Data":"528e41cdbf10f445277ad9bbd7e0cee1cb39b9e0b270368cb046d67b713f70e2"} Nov 24 13:42:48 crc kubenswrapper[5039]: I1124 13:42:48.632833 5039 generic.go:334] "Generic (PLEG): container finished" podID="0bbaa038-035d-4e44-ace2-4ac374ccc28a" containerID="807e0bfefe975cd3524f0f960ea593d92b4f19f497a08465800071116473344b" exitCode=0 Nov 24 13:42:48 crc kubenswrapper[5039]: I1124 13:42:48.632985 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-c49v2" event={"ID":"0bbaa038-035d-4e44-ace2-4ac374ccc28a","Type":"ContainerDied","Data":"807e0bfefe975cd3524f0f960ea593d92b4f19f497a08465800071116473344b"} Nov 24 13:42:49 crc kubenswrapper[5039]: I1124 13:42:49.644954 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380","Type":"ContainerStarted","Data":"c493f3f049f78f6b76d82d248365f32ed985ec6374926f81fbe4bc67944edc88"} Nov 24 13:42:49 crc kubenswrapper[5039]: I1124 13:42:49.645293 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380","Type":"ContainerStarted","Data":"ad6b98286d7cdb8262fa01e00f242c8aa51cb716832ad4b510cb5ed2850f49e7"} Nov 24 13:42:49 crc kubenswrapper[5039]: I1124 13:42:49.645310 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380","Type":"ContainerStarted","Data":"d80ccbcfa6e80d45a2e2c3365478aa053ba6dbd556a3500c5760aff6180c9725"} Nov 24 13:42:49 crc kubenswrapper[5039]: I1124 13:42:49.668227 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.668208324 podStartE2EDuration="2.668208324s" podCreationTimestamp="2025-11-24 13:42:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:42:49.661839987 +0000 UTC m=+1482.100964507" watchObservedRunningTime="2025-11-24 13:42:49.668208324 +0000 UTC m=+1482.107332824" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.101786 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.101839 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.180844 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-c49v2" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.189462 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-dv6r5" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.301778 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bbaa038-035d-4e44-ace2-4ac374ccc28a-combined-ca-bundle\") pod \"0bbaa038-035d-4e44-ace2-4ac374ccc28a\" (UID: \"0bbaa038-035d-4e44-ace2-4ac374ccc28a\") " Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.301873 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0bbaa038-035d-4e44-ace2-4ac374ccc28a-scripts\") pod \"0bbaa038-035d-4e44-ace2-4ac374ccc28a\" (UID: \"0bbaa038-035d-4e44-ace2-4ac374ccc28a\") " Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.302033 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5558fd75-638f-4d1e-b0d8-e8e071471415-config-data\") pod \"5558fd75-638f-4d1e-b0d8-e8e071471415\" (UID: \"5558fd75-638f-4d1e-b0d8-e8e071471415\") " Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.302109 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bbaa038-035d-4e44-ace2-4ac374ccc28a-config-data\") pod \"0bbaa038-035d-4e44-ace2-4ac374ccc28a\" (UID: \"0bbaa038-035d-4e44-ace2-4ac374ccc28a\") " Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.302221 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5558fd75-638f-4d1e-b0d8-e8e071471415-combined-ca-bundle\") pod \"5558fd75-638f-4d1e-b0d8-e8e071471415\" (UID: \"5558fd75-638f-4d1e-b0d8-e8e071471415\") " Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.302291 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v8l72\" (UniqueName: \"kubernetes.io/projected/0bbaa038-035d-4e44-ace2-4ac374ccc28a-kube-api-access-v8l72\") pod \"0bbaa038-035d-4e44-ace2-4ac374ccc28a\" (UID: \"0bbaa038-035d-4e44-ace2-4ac374ccc28a\") " Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.302351 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fgzsn\" (UniqueName: \"kubernetes.io/projected/5558fd75-638f-4d1e-b0d8-e8e071471415-kube-api-access-fgzsn\") pod \"5558fd75-638f-4d1e-b0d8-e8e071471415\" (UID: \"5558fd75-638f-4d1e-b0d8-e8e071471415\") " Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.302392 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5558fd75-638f-4d1e-b0d8-e8e071471415-scripts\") pod \"5558fd75-638f-4d1e-b0d8-e8e071471415\" (UID: \"5558fd75-638f-4d1e-b0d8-e8e071471415\") " Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.308555 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bbaa038-035d-4e44-ace2-4ac374ccc28a-scripts" (OuterVolumeSpecName: "scripts") pod "0bbaa038-035d-4e44-ace2-4ac374ccc28a" (UID: "0bbaa038-035d-4e44-ace2-4ac374ccc28a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.308960 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0bbaa038-035d-4e44-ace2-4ac374ccc28a-kube-api-access-v8l72" (OuterVolumeSpecName: "kube-api-access-v8l72") pod "0bbaa038-035d-4e44-ace2-4ac374ccc28a" (UID: "0bbaa038-035d-4e44-ace2-4ac374ccc28a"). InnerVolumeSpecName "kube-api-access-v8l72". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.310691 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5558fd75-638f-4d1e-b0d8-e8e071471415-kube-api-access-fgzsn" (OuterVolumeSpecName: "kube-api-access-fgzsn") pod "5558fd75-638f-4d1e-b0d8-e8e071471415" (UID: "5558fd75-638f-4d1e-b0d8-e8e071471415"). InnerVolumeSpecName "kube-api-access-fgzsn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.311999 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5558fd75-638f-4d1e-b0d8-e8e071471415-scripts" (OuterVolumeSpecName: "scripts") pod "5558fd75-638f-4d1e-b0d8-e8e071471415" (UID: "5558fd75-638f-4d1e-b0d8-e8e071471415"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.333292 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5558fd75-638f-4d1e-b0d8-e8e071471415-config-data" (OuterVolumeSpecName: "config-data") pod "5558fd75-638f-4d1e-b0d8-e8e071471415" (UID: "5558fd75-638f-4d1e-b0d8-e8e071471415"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.336707 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bbaa038-035d-4e44-ace2-4ac374ccc28a-config-data" (OuterVolumeSpecName: "config-data") pod "0bbaa038-035d-4e44-ace2-4ac374ccc28a" (UID: "0bbaa038-035d-4e44-ace2-4ac374ccc28a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.342334 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bbaa038-035d-4e44-ace2-4ac374ccc28a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0bbaa038-035d-4e44-ace2-4ac374ccc28a" (UID: "0bbaa038-035d-4e44-ace2-4ac374ccc28a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.351842 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5558fd75-638f-4d1e-b0d8-e8e071471415-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5558fd75-638f-4d1e-b0d8-e8e071471415" (UID: "5558fd75-638f-4d1e-b0d8-e8e071471415"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.404978 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bbaa038-035d-4e44-ace2-4ac374ccc28a-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.405252 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5558fd75-638f-4d1e-b0d8-e8e071471415-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.405375 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v8l72\" (UniqueName: \"kubernetes.io/projected/0bbaa038-035d-4e44-ace2-4ac374ccc28a-kube-api-access-v8l72\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.405548 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fgzsn\" (UniqueName: \"kubernetes.io/projected/5558fd75-638f-4d1e-b0d8-e8e071471415-kube-api-access-fgzsn\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.405698 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5558fd75-638f-4d1e-b0d8-e8e071471415-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.405823 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bbaa038-035d-4e44-ace2-4ac374ccc28a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.405966 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0bbaa038-035d-4e44-ace2-4ac374ccc28a-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.406107 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5558fd75-638f-4d1e-b0d8-e8e071471415-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.656430 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-dv6r5" event={"ID":"5558fd75-638f-4d1e-b0d8-e8e071471415","Type":"ContainerDied","Data":"eabc9c79c701937b8c14c0fb535432b6c17fc3b940b0eb960b8744b566556654"} Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.656480 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eabc9c79c701937b8c14c0fb535432b6c17fc3b940b0eb960b8744b566556654" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.656582 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-dv6r5" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.659331 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-c49v2" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.659339 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-c49v2" event={"ID":"0bbaa038-035d-4e44-ace2-4ac374ccc28a","Type":"ContainerDied","Data":"ff0940924d3dce4581d11109e9a177ee1016e2b8675e4761ef0e1ef461e91c27"} Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.659387 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ff0940924d3dce4581d11109e9a177ee1016e2b8675e4761ef0e1ef461e91c27" Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.661527 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-968s2" event={"ID":"9d757ee4-2d14-4644-9323-955537ee639e","Type":"ContainerDied","Data":"b4d85e4d184e4998db46db0b1205a979b16c3d967c5396ba0647aaf389585711"} Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.661523 5039 generic.go:334] "Generic (PLEG): container finished" podID="9d757ee4-2d14-4644-9323-955537ee639e" containerID="b4d85e4d184e4998db46db0b1205a979b16c3d967c5396ba0647aaf389585711" exitCode=0 Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.834916 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.835485 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="40c70f05-786b-43b9-9bb5-87b6f7907cc1" containerName="nova-api-log" containerID="cri-o://8a5598eb8354cf4b2962d73b351825d07554d321089648b8edee6aa50e8516c8" gracePeriod=30 Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.835631 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="40c70f05-786b-43b9-9bb5-87b6f7907cc1" containerName="nova-api-api" containerID="cri-o://3e8ad326f0e5aa69207aebcad58d3d771092003f9982ff1ef4cf628a7d8d46a7" gracePeriod=30 Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.852494 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.852725 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="769a2230-2e35-4d97-94a2-5c9cfcf0e054" containerName="nova-scheduler-scheduler" containerID="cri-o://8e3dfc9d1f8f4bbbf46e789329a83cd07a146c178c8603dc743670c0b0926b7a" gracePeriod=30 Nov 24 13:42:50 crc kubenswrapper[5039]: I1124 13:42:50.914948 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 13:42:51 crc kubenswrapper[5039]: I1124 13:42:51.382084 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 24 13:42:51 crc kubenswrapper[5039]: I1124 13:42:51.671930 5039 generic.go:334] "Generic (PLEG): container finished" podID="40c70f05-786b-43b9-9bb5-87b6f7907cc1" containerID="8a5598eb8354cf4b2962d73b351825d07554d321089648b8edee6aa50e8516c8" exitCode=143 Nov 24 13:42:51 crc kubenswrapper[5039]: I1124 13:42:51.672009 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"40c70f05-786b-43b9-9bb5-87b6f7907cc1","Type":"ContainerDied","Data":"8a5598eb8354cf4b2962d73b351825d07554d321089648b8edee6aa50e8516c8"} Nov 24 13:42:51 crc kubenswrapper[5039]: I1124 13:42:51.672327 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380" containerName="nova-metadata-log" containerID="cri-o://ad6b98286d7cdb8262fa01e00f242c8aa51cb716832ad4b510cb5ed2850f49e7" gracePeriod=30 Nov 24 13:42:51 crc kubenswrapper[5039]: I1124 13:42:51.672778 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380" containerName="nova-metadata-metadata" containerID="cri-o://c493f3f049f78f6b76d82d248365f32ed985ec6374926f81fbe4bc67944edc88" gracePeriod=30 Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.228749 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-968s2" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.402372 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d757ee4-2d14-4644-9323-955537ee639e-scripts\") pod \"9d757ee4-2d14-4644-9323-955537ee639e\" (UID: \"9d757ee4-2d14-4644-9323-955537ee639e\") " Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.402413 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d757ee4-2d14-4644-9323-955537ee639e-combined-ca-bundle\") pod \"9d757ee4-2d14-4644-9323-955537ee639e\" (UID: \"9d757ee4-2d14-4644-9323-955537ee639e\") " Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.402799 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d757ee4-2d14-4644-9323-955537ee639e-config-data\") pod \"9d757ee4-2d14-4644-9323-955537ee639e\" (UID: \"9d757ee4-2d14-4644-9323-955537ee639e\") " Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.402892 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-khtgz\" (UniqueName: \"kubernetes.io/projected/9d757ee4-2d14-4644-9323-955537ee639e-kube-api-access-khtgz\") pod \"9d757ee4-2d14-4644-9323-955537ee639e\" (UID: \"9d757ee4-2d14-4644-9323-955537ee639e\") " Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.408315 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d757ee4-2d14-4644-9323-955537ee639e-scripts" (OuterVolumeSpecName: "scripts") pod "9d757ee4-2d14-4644-9323-955537ee639e" (UID: "9d757ee4-2d14-4644-9323-955537ee639e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.408939 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d757ee4-2d14-4644-9323-955537ee639e-kube-api-access-khtgz" (OuterVolumeSpecName: "kube-api-access-khtgz") pod "9d757ee4-2d14-4644-9323-955537ee639e" (UID: "9d757ee4-2d14-4644-9323-955537ee639e"). InnerVolumeSpecName "kube-api-access-khtgz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.442942 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d757ee4-2d14-4644-9323-955537ee639e-config-data" (OuterVolumeSpecName: "config-data") pod "9d757ee4-2d14-4644-9323-955537ee639e" (UID: "9d757ee4-2d14-4644-9323-955537ee639e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.445882 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d757ee4-2d14-4644-9323-955537ee639e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9d757ee4-2d14-4644-9323-955537ee639e" (UID: "9d757ee4-2d14-4644-9323-955537ee639e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.506712 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-khtgz\" (UniqueName: \"kubernetes.io/projected/9d757ee4-2d14-4644-9323-955537ee639e-kube-api-access-khtgz\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.506837 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d757ee4-2d14-4644-9323-955537ee639e-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.506898 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d757ee4-2d14-4644-9323-955537ee639e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.506971 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d757ee4-2d14-4644-9323-955537ee639e-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.517662 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.687336 5039 generic.go:334] "Generic (PLEG): container finished" podID="769a2230-2e35-4d97-94a2-5c9cfcf0e054" containerID="8e3dfc9d1f8f4bbbf46e789329a83cd07a146c178c8603dc743670c0b0926b7a" exitCode=0 Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.688351 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"769a2230-2e35-4d97-94a2-5c9cfcf0e054","Type":"ContainerDied","Data":"8e3dfc9d1f8f4bbbf46e789329a83cd07a146c178c8603dc743670c0b0926b7a"} Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.690592 5039 generic.go:334] "Generic (PLEG): container finished" podID="2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380" containerID="c493f3f049f78f6b76d82d248365f32ed985ec6374926f81fbe4bc67944edc88" exitCode=0 Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.690744 5039 generic.go:334] "Generic (PLEG): container finished" podID="2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380" containerID="ad6b98286d7cdb8262fa01e00f242c8aa51cb716832ad4b510cb5ed2850f49e7" exitCode=143 Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.690679 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.690671 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380","Type":"ContainerDied","Data":"c493f3f049f78f6b76d82d248365f32ed985ec6374926f81fbe4bc67944edc88"} Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.691083 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380","Type":"ContainerDied","Data":"ad6b98286d7cdb8262fa01e00f242c8aa51cb716832ad4b510cb5ed2850f49e7"} Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.691120 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380","Type":"ContainerDied","Data":"d80ccbcfa6e80d45a2e2c3365478aa053ba6dbd556a3500c5760aff6180c9725"} Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.691141 5039 scope.go:117] "RemoveContainer" containerID="c493f3f049f78f6b76d82d248365f32ed985ec6374926f81fbe4bc67944edc88" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.693711 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-968s2" event={"ID":"9d757ee4-2d14-4644-9323-955537ee639e","Type":"ContainerDied","Data":"32a7b0522186879cc19cf9f74893da13a841469614cfc5909198e542c31beb60"} Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.693747 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="32a7b0522186879cc19cf9f74893da13a841469614cfc5909198e542c31beb60" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.693824 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-968s2" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.710576 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-combined-ca-bundle\") pod \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\" (UID: \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\") " Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.712221 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-nova-metadata-tls-certs\") pod \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\" (UID: \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\") " Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.712445 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-logs\") pod \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\" (UID: \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\") " Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.712748 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xkmb4\" (UniqueName: \"kubernetes.io/projected/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-kube-api-access-xkmb4\") pod \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\" (UID: \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\") " Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.712963 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-config-data\") pod \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\" (UID: \"2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380\") " Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.713926 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-logs" (OuterVolumeSpecName: "logs") pod "2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380" (UID: "2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.721424 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-kube-api-access-xkmb4" (OuterVolumeSpecName: "kube-api-access-xkmb4") pod "2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380" (UID: "2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380"). InnerVolumeSpecName "kube-api-access-xkmb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.742216 5039 scope.go:117] "RemoveContainer" containerID="ad6b98286d7cdb8262fa01e00f242c8aa51cb716832ad4b510cb5ed2850f49e7" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.765701 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-config-data" (OuterVolumeSpecName: "config-data") pod "2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380" (UID: "2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.775747 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 24 13:42:52 crc kubenswrapper[5039]: E1124 13:42:52.776327 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bbaa038-035d-4e44-ace2-4ac374ccc28a" containerName="aodh-db-sync" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.776345 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bbaa038-035d-4e44-ace2-4ac374ccc28a" containerName="aodh-db-sync" Nov 24 13:42:52 crc kubenswrapper[5039]: E1124 13:42:52.776376 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5558fd75-638f-4d1e-b0d8-e8e071471415" containerName="nova-manage" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.776384 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="5558fd75-638f-4d1e-b0d8-e8e071471415" containerName="nova-manage" Nov 24 13:42:52 crc kubenswrapper[5039]: E1124 13:42:52.776401 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d757ee4-2d14-4644-9323-955537ee639e" containerName="nova-cell1-conductor-db-sync" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.776409 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d757ee4-2d14-4644-9323-955537ee639e" containerName="nova-cell1-conductor-db-sync" Nov 24 13:42:52 crc kubenswrapper[5039]: E1124 13:42:52.776424 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380" containerName="nova-metadata-metadata" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.776430 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380" containerName="nova-metadata-metadata" Nov 24 13:42:52 crc kubenswrapper[5039]: E1124 13:42:52.776457 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380" containerName="nova-metadata-log" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.776464 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380" containerName="nova-metadata-log" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.776717 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380" containerName="nova-metadata-log" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.776729 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="5558fd75-638f-4d1e-b0d8-e8e071471415" containerName="nova-manage" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.776746 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bbaa038-035d-4e44-ace2-4ac374ccc28a" containerName="aodh-db-sync" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.776766 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d757ee4-2d14-4644-9323-955537ee639e" containerName="nova-cell1-conductor-db-sync" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.776779 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380" containerName="nova-metadata-metadata" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.777705 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.781112 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.785836 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380" (UID: "2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.789282 5039 scope.go:117] "RemoveContainer" containerID="c493f3f049f78f6b76d82d248365f32ed985ec6374926f81fbe4bc67944edc88" Nov 24 13:42:52 crc kubenswrapper[5039]: E1124 13:42:52.790340 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c493f3f049f78f6b76d82d248365f32ed985ec6374926f81fbe4bc67944edc88\": container with ID starting with c493f3f049f78f6b76d82d248365f32ed985ec6374926f81fbe4bc67944edc88 not found: ID does not exist" containerID="c493f3f049f78f6b76d82d248365f32ed985ec6374926f81fbe4bc67944edc88" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.790446 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c493f3f049f78f6b76d82d248365f32ed985ec6374926f81fbe4bc67944edc88"} err="failed to get container status \"c493f3f049f78f6b76d82d248365f32ed985ec6374926f81fbe4bc67944edc88\": rpc error: code = NotFound desc = could not find container \"c493f3f049f78f6b76d82d248365f32ed985ec6374926f81fbe4bc67944edc88\": container with ID starting with c493f3f049f78f6b76d82d248365f32ed985ec6374926f81fbe4bc67944edc88 not found: ID does not exist" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.790606 5039 scope.go:117] "RemoveContainer" containerID="ad6b98286d7cdb8262fa01e00f242c8aa51cb716832ad4b510cb5ed2850f49e7" Nov 24 13:42:52 crc kubenswrapper[5039]: E1124 13:42:52.791977 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad6b98286d7cdb8262fa01e00f242c8aa51cb716832ad4b510cb5ed2850f49e7\": container with ID starting with ad6b98286d7cdb8262fa01e00f242c8aa51cb716832ad4b510cb5ed2850f49e7 not found: ID does not exist" containerID="ad6b98286d7cdb8262fa01e00f242c8aa51cb716832ad4b510cb5ed2850f49e7" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.792064 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad6b98286d7cdb8262fa01e00f242c8aa51cb716832ad4b510cb5ed2850f49e7"} err="failed to get container status \"ad6b98286d7cdb8262fa01e00f242c8aa51cb716832ad4b510cb5ed2850f49e7\": rpc error: code = NotFound desc = could not find container \"ad6b98286d7cdb8262fa01e00f242c8aa51cb716832ad4b510cb5ed2850f49e7\": container with ID starting with ad6b98286d7cdb8262fa01e00f242c8aa51cb716832ad4b510cb5ed2850f49e7 not found: ID does not exist" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.792158 5039 scope.go:117] "RemoveContainer" containerID="c493f3f049f78f6b76d82d248365f32ed985ec6374926f81fbe4bc67944edc88" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.792996 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c493f3f049f78f6b76d82d248365f32ed985ec6374926f81fbe4bc67944edc88"} err="failed to get container status \"c493f3f049f78f6b76d82d248365f32ed985ec6374926f81fbe4bc67944edc88\": rpc error: code = NotFound desc = could not find container \"c493f3f049f78f6b76d82d248365f32ed985ec6374926f81fbe4bc67944edc88\": container with ID starting with c493f3f049f78f6b76d82d248365f32ed985ec6374926f81fbe4bc67944edc88 not found: ID does not exist" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.793111 5039 scope.go:117] "RemoveContainer" containerID="ad6b98286d7cdb8262fa01e00f242c8aa51cb716832ad4b510cb5ed2850f49e7" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.793621 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad6b98286d7cdb8262fa01e00f242c8aa51cb716832ad4b510cb5ed2850f49e7"} err="failed to get container status \"ad6b98286d7cdb8262fa01e00f242c8aa51cb716832ad4b510cb5ed2850f49e7\": rpc error: code = NotFound desc = could not find container \"ad6b98286d7cdb8262fa01e00f242c8aa51cb716832ad4b510cb5ed2850f49e7\": container with ID starting with ad6b98286d7cdb8262fa01e00f242c8aa51cb716832ad4b510cb5ed2850f49e7 not found: ID does not exist" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.793965 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.815191 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5894e63c-79c2-42a0-bc65-95f1a69a1525-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"5894e63c-79c2-42a0-bc65-95f1a69a1525\") " pod="openstack/nova-cell1-conductor-0" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.815535 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5894e63c-79c2-42a0-bc65-95f1a69a1525-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"5894e63c-79c2-42a0-bc65-95f1a69a1525\") " pod="openstack/nova-cell1-conductor-0" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.815678 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wg54\" (UniqueName: \"kubernetes.io/projected/5894e63c-79c2-42a0-bc65-95f1a69a1525-kube-api-access-9wg54\") pod \"nova-cell1-conductor-0\" (UID: \"5894e63c-79c2-42a0-bc65-95f1a69a1525\") " pod="openstack/nova-cell1-conductor-0" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.815872 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xkmb4\" (UniqueName: \"kubernetes.io/projected/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-kube-api-access-xkmb4\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.815945 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.816040 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.816114 5039 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-logs\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.838758 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380" (UID: "2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.917983 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5894e63c-79c2-42a0-bc65-95f1a69a1525-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"5894e63c-79c2-42a0-bc65-95f1a69a1525\") " pod="openstack/nova-cell1-conductor-0" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.918051 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5894e63c-79c2-42a0-bc65-95f1a69a1525-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"5894e63c-79c2-42a0-bc65-95f1a69a1525\") " pod="openstack/nova-cell1-conductor-0" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.918108 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wg54\" (UniqueName: \"kubernetes.io/projected/5894e63c-79c2-42a0-bc65-95f1a69a1525-kube-api-access-9wg54\") pod \"nova-cell1-conductor-0\" (UID: \"5894e63c-79c2-42a0-bc65-95f1a69a1525\") " pod="openstack/nova-cell1-conductor-0" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.918307 5039 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.922395 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5894e63c-79c2-42a0-bc65-95f1a69a1525-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"5894e63c-79c2-42a0-bc65-95f1a69a1525\") " pod="openstack/nova-cell1-conductor-0" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.935174 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wg54\" (UniqueName: \"kubernetes.io/projected/5894e63c-79c2-42a0-bc65-95f1a69a1525-kube-api-access-9wg54\") pod \"nova-cell1-conductor-0\" (UID: \"5894e63c-79c2-42a0-bc65-95f1a69a1525\") " pod="openstack/nova-cell1-conductor-0" Nov 24 13:42:52 crc kubenswrapper[5039]: I1124 13:42:52.950587 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5894e63c-79c2-42a0-bc65-95f1a69a1525-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"5894e63c-79c2-42a0-bc65-95f1a69a1525\") " pod="openstack/nova-cell1-conductor-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.027395 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.042929 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.066360 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.068232 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.072938 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.074836 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.081368 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.106900 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.215017 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.235071 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\") " pod="openstack/nova-metadata-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.235266 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\") " pod="openstack/nova-metadata-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.235350 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-logs\") pod \"nova-metadata-0\" (UID: \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\") " pod="openstack/nova-metadata-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.235377 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-config-data\") pod \"nova-metadata-0\" (UID: \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\") " pod="openstack/nova-metadata-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.235401 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hssm9\" (UniqueName: \"kubernetes.io/projected/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-kube-api-access-hssm9\") pod \"nova-metadata-0\" (UID: \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\") " pod="openstack/nova-metadata-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.336481 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/769a2230-2e35-4d97-94a2-5c9cfcf0e054-combined-ca-bundle\") pod \"769a2230-2e35-4d97-94a2-5c9cfcf0e054\" (UID: \"769a2230-2e35-4d97-94a2-5c9cfcf0e054\") " Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.336624 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hcw2p\" (UniqueName: \"kubernetes.io/projected/769a2230-2e35-4d97-94a2-5c9cfcf0e054-kube-api-access-hcw2p\") pod \"769a2230-2e35-4d97-94a2-5c9cfcf0e054\" (UID: \"769a2230-2e35-4d97-94a2-5c9cfcf0e054\") " Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.337142 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/769a2230-2e35-4d97-94a2-5c9cfcf0e054-config-data\") pod \"769a2230-2e35-4d97-94a2-5c9cfcf0e054\" (UID: \"769a2230-2e35-4d97-94a2-5c9cfcf0e054\") " Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.337310 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\") " pod="openstack/nova-metadata-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.337408 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-logs\") pod \"nova-metadata-0\" (UID: \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\") " pod="openstack/nova-metadata-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.337433 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-config-data\") pod \"nova-metadata-0\" (UID: \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\") " pod="openstack/nova-metadata-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.337457 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hssm9\" (UniqueName: \"kubernetes.io/projected/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-kube-api-access-hssm9\") pod \"nova-metadata-0\" (UID: \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\") " pod="openstack/nova-metadata-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.337780 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\") " pod="openstack/nova-metadata-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.338743 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-logs\") pod \"nova-metadata-0\" (UID: \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\") " pod="openstack/nova-metadata-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.341796 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/769a2230-2e35-4d97-94a2-5c9cfcf0e054-kube-api-access-hcw2p" (OuterVolumeSpecName: "kube-api-access-hcw2p") pod "769a2230-2e35-4d97-94a2-5c9cfcf0e054" (UID: "769a2230-2e35-4d97-94a2-5c9cfcf0e054"). InnerVolumeSpecName "kube-api-access-hcw2p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.341965 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-config-data\") pod \"nova-metadata-0\" (UID: \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\") " pod="openstack/nova-metadata-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.342308 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\") " pod="openstack/nova-metadata-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.343334 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\") " pod="openstack/nova-metadata-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.355306 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hssm9\" (UniqueName: \"kubernetes.io/projected/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-kube-api-access-hssm9\") pod \"nova-metadata-0\" (UID: \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\") " pod="openstack/nova-metadata-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.373488 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/769a2230-2e35-4d97-94a2-5c9cfcf0e054-config-data" (OuterVolumeSpecName: "config-data") pod "769a2230-2e35-4d97-94a2-5c9cfcf0e054" (UID: "769a2230-2e35-4d97-94a2-5c9cfcf0e054"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.382175 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/769a2230-2e35-4d97-94a2-5c9cfcf0e054-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "769a2230-2e35-4d97-94a2-5c9cfcf0e054" (UID: "769a2230-2e35-4d97-94a2-5c9cfcf0e054"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.439629 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/769a2230-2e35-4d97-94a2-5c9cfcf0e054-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.439667 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hcw2p\" (UniqueName: \"kubernetes.io/projected/769a2230-2e35-4d97-94a2-5c9cfcf0e054-kube-api-access-hcw2p\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.439683 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/769a2230-2e35-4d97-94a2-5c9cfcf0e054-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.529610 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.593767 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.716099 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"769a2230-2e35-4d97-94a2-5c9cfcf0e054","Type":"ContainerDied","Data":"9815c02048f35b241f9fea9fb8e9be5c63e7218964e69c72cd1cc4910f768d64"} Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.716121 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.716591 5039 scope.go:117] "RemoveContainer" containerID="8e3dfc9d1f8f4bbbf46e789329a83cd07a146c178c8603dc743670c0b0926b7a" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.723471 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"5894e63c-79c2-42a0-bc65-95f1a69a1525","Type":"ContainerStarted","Data":"3d465068564593590a74585a840aaf6a11cbbe705ff31ea724b5871f667e79d3"} Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.784909 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.799971 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.815285 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 13:42:53 crc kubenswrapper[5039]: E1124 13:42:53.816176 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="769a2230-2e35-4d97-94a2-5c9cfcf0e054" containerName="nova-scheduler-scheduler" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.816195 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="769a2230-2e35-4d97-94a2-5c9cfcf0e054" containerName="nova-scheduler-scheduler" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.816440 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="769a2230-2e35-4d97-94a2-5c9cfcf0e054" containerName="nova-scheduler-scheduler" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.817267 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.819790 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.825985 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.947697 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zv92t\" (UniqueName: \"kubernetes.io/projected/366e8969-e52e-4bdf-8171-18d9dedff03c-kube-api-access-zv92t\") pod \"nova-scheduler-0\" (UID: \"366e8969-e52e-4bdf-8171-18d9dedff03c\") " pod="openstack/nova-scheduler-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.948475 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/366e8969-e52e-4bdf-8171-18d9dedff03c-config-data\") pod \"nova-scheduler-0\" (UID: \"366e8969-e52e-4bdf-8171-18d9dedff03c\") " pod="openstack/nova-scheduler-0" Nov 24 13:42:53 crc kubenswrapper[5039]: I1124 13:42:53.948532 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/366e8969-e52e-4bdf-8171-18d9dedff03c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"366e8969-e52e-4bdf-8171-18d9dedff03c\") " pod="openstack/nova-scheduler-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.029206 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.039059 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.041627 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.052137 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.052381 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-k6pb7" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.052433 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zv92t\" (UniqueName: \"kubernetes.io/projected/366e8969-e52e-4bdf-8171-18d9dedff03c-kube-api-access-zv92t\") pod \"nova-scheduler-0\" (UID: \"366e8969-e52e-4bdf-8171-18d9dedff03c\") " pod="openstack/nova-scheduler-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.052528 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/366e8969-e52e-4bdf-8171-18d9dedff03c-config-data\") pod \"nova-scheduler-0\" (UID: \"366e8969-e52e-4bdf-8171-18d9dedff03c\") " pod="openstack/nova-scheduler-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.052551 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/366e8969-e52e-4bdf-8171-18d9dedff03c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"366e8969-e52e-4bdf-8171-18d9dedff03c\") " pod="openstack/nova-scheduler-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.052631 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.058209 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/366e8969-e52e-4bdf-8171-18d9dedff03c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"366e8969-e52e-4bdf-8171-18d9dedff03c\") " pod="openstack/nova-scheduler-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.059278 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/366e8969-e52e-4bdf-8171-18d9dedff03c-config-data\") pod \"nova-scheduler-0\" (UID: \"366e8969-e52e-4bdf-8171-18d9dedff03c\") " pod="openstack/nova-scheduler-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.071163 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.081053 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zv92t\" (UniqueName: \"kubernetes.io/projected/366e8969-e52e-4bdf-8171-18d9dedff03c-kube-api-access-zv92t\") pod \"nova-scheduler-0\" (UID: \"366e8969-e52e-4bdf-8171-18d9dedff03c\") " pod="openstack/nova-scheduler-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.139464 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.157375 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0168214d-ac93-41c7-babc-e048a74fca46-config-data\") pod \"aodh-0\" (UID: \"0168214d-ac93-41c7-babc-e048a74fca46\") " pod="openstack/aodh-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.157528 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0168214d-ac93-41c7-babc-e048a74fca46-combined-ca-bundle\") pod \"aodh-0\" (UID: \"0168214d-ac93-41c7-babc-e048a74fca46\") " pod="openstack/aodh-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.157631 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0168214d-ac93-41c7-babc-e048a74fca46-scripts\") pod \"aodh-0\" (UID: \"0168214d-ac93-41c7-babc-e048a74fca46\") " pod="openstack/aodh-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.157858 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mrt6\" (UniqueName: \"kubernetes.io/projected/0168214d-ac93-41c7-babc-e048a74fca46-kube-api-access-8mrt6\") pod \"aodh-0\" (UID: \"0168214d-ac93-41c7-babc-e048a74fca46\") " pod="openstack/aodh-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.260928 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mrt6\" (UniqueName: \"kubernetes.io/projected/0168214d-ac93-41c7-babc-e048a74fca46-kube-api-access-8mrt6\") pod \"aodh-0\" (UID: \"0168214d-ac93-41c7-babc-e048a74fca46\") " pod="openstack/aodh-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.261014 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0168214d-ac93-41c7-babc-e048a74fca46-config-data\") pod \"aodh-0\" (UID: \"0168214d-ac93-41c7-babc-e048a74fca46\") " pod="openstack/aodh-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.261068 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0168214d-ac93-41c7-babc-e048a74fca46-combined-ca-bundle\") pod \"aodh-0\" (UID: \"0168214d-ac93-41c7-babc-e048a74fca46\") " pod="openstack/aodh-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.261111 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0168214d-ac93-41c7-babc-e048a74fca46-scripts\") pod \"aodh-0\" (UID: \"0168214d-ac93-41c7-babc-e048a74fca46\") " pod="openstack/aodh-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.272704 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0168214d-ac93-41c7-babc-e048a74fca46-combined-ca-bundle\") pod \"aodh-0\" (UID: \"0168214d-ac93-41c7-babc-e048a74fca46\") " pod="openstack/aodh-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.275177 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0168214d-ac93-41c7-babc-e048a74fca46-config-data\") pod \"aodh-0\" (UID: \"0168214d-ac93-41c7-babc-e048a74fca46\") " pod="openstack/aodh-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.287865 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0168214d-ac93-41c7-babc-e048a74fca46-scripts\") pod \"aodh-0\" (UID: \"0168214d-ac93-41c7-babc-e048a74fca46\") " pod="openstack/aodh-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.288300 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mrt6\" (UniqueName: \"kubernetes.io/projected/0168214d-ac93-41c7-babc-e048a74fca46-kube-api-access-8mrt6\") pod \"aodh-0\" (UID: \"0168214d-ac93-41c7-babc-e048a74fca46\") " pod="openstack/aodh-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.453197 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380" path="/var/lib/kubelet/pods/2859d6eb-5fec-4b3a-a6b0-abfe7cb6c380/volumes" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.454320 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="769a2230-2e35-4d97-94a2-5c9cfcf0e054" path="/var/lib/kubelet/pods/769a2230-2e35-4d97-94a2-5c9cfcf0e054/volumes" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.545077 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.569257 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.669623 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wtwsl\" (UniqueName: \"kubernetes.io/projected/40c70f05-786b-43b9-9bb5-87b6f7907cc1-kube-api-access-wtwsl\") pod \"40c70f05-786b-43b9-9bb5-87b6f7907cc1\" (UID: \"40c70f05-786b-43b9-9bb5-87b6f7907cc1\") " Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.669967 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40c70f05-786b-43b9-9bb5-87b6f7907cc1-logs\") pod \"40c70f05-786b-43b9-9bb5-87b6f7907cc1\" (UID: \"40c70f05-786b-43b9-9bb5-87b6f7907cc1\") " Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.670020 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40c70f05-786b-43b9-9bb5-87b6f7907cc1-config-data\") pod \"40c70f05-786b-43b9-9bb5-87b6f7907cc1\" (UID: \"40c70f05-786b-43b9-9bb5-87b6f7907cc1\") " Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.670105 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40c70f05-786b-43b9-9bb5-87b6f7907cc1-combined-ca-bundle\") pod \"40c70f05-786b-43b9-9bb5-87b6f7907cc1\" (UID: \"40c70f05-786b-43b9-9bb5-87b6f7907cc1\") " Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.671088 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40c70f05-786b-43b9-9bb5-87b6f7907cc1-logs" (OuterVolumeSpecName: "logs") pod "40c70f05-786b-43b9-9bb5-87b6f7907cc1" (UID: "40c70f05-786b-43b9-9bb5-87b6f7907cc1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:42:54 crc kubenswrapper[5039]: I1124 13:42:54.684540 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40c70f05-786b-43b9-9bb5-87b6f7907cc1-kube-api-access-wtwsl" (OuterVolumeSpecName: "kube-api-access-wtwsl") pod "40c70f05-786b-43b9-9bb5-87b6f7907cc1" (UID: "40c70f05-786b-43b9-9bb5-87b6f7907cc1"). InnerVolumeSpecName "kube-api-access-wtwsl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.745353 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40c70f05-786b-43b9-9bb5-87b6f7907cc1-config-data" (OuterVolumeSpecName: "config-data") pod "40c70f05-786b-43b9-9bb5-87b6f7907cc1" (UID: "40c70f05-786b-43b9-9bb5-87b6f7907cc1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.746878 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b","Type":"ContainerStarted","Data":"5d79751868aa423bc2ee3b734b640e180feacd89ffec45885adaabc31ee4bee7"} Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.746922 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b","Type":"ContainerStarted","Data":"b65e4237f91e86058d39ca99008d8feeed9c930dc27b068da5efd6d99433305e"} Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.759385 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40c70f05-786b-43b9-9bb5-87b6f7907cc1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "40c70f05-786b-43b9-9bb5-87b6f7907cc1" (UID: "40c70f05-786b-43b9-9bb5-87b6f7907cc1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.772552 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wtwsl\" (UniqueName: \"kubernetes.io/projected/40c70f05-786b-43b9-9bb5-87b6f7907cc1-kube-api-access-wtwsl\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.772610 5039 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40c70f05-786b-43b9-9bb5-87b6f7907cc1-logs\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.772623 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40c70f05-786b-43b9-9bb5-87b6f7907cc1-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.772634 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40c70f05-786b-43b9-9bb5-87b6f7907cc1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.778926 5039 generic.go:334] "Generic (PLEG): container finished" podID="40c70f05-786b-43b9-9bb5-87b6f7907cc1" containerID="3e8ad326f0e5aa69207aebcad58d3d771092003f9982ff1ef4cf628a7d8d46a7" exitCode=0 Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.778991 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"40c70f05-786b-43b9-9bb5-87b6f7907cc1","Type":"ContainerDied","Data":"3e8ad326f0e5aa69207aebcad58d3d771092003f9982ff1ef4cf628a7d8d46a7"} Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.779022 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"40c70f05-786b-43b9-9bb5-87b6f7907cc1","Type":"ContainerDied","Data":"c081b95bd24c19ea3ffac6d70980e28755994ecbf10c0cd51c96bfbad2d3eec9"} Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.779044 5039 scope.go:117] "RemoveContainer" containerID="3e8ad326f0e5aa69207aebcad58d3d771092003f9982ff1ef4cf628a7d8d46a7" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.779208 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.793032 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"5894e63c-79c2-42a0-bc65-95f1a69a1525","Type":"ContainerStarted","Data":"766c364a9cdd43a6b07f789bda38509cff38a3183dbc6f1f44af09812b52cbb5"} Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.793360 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.822845 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.822813171 podStartE2EDuration="2.822813171s" podCreationTimestamp="2025-11-24 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:42:54.811724468 +0000 UTC m=+1487.250848998" watchObservedRunningTime="2025-11-24 13:42:54.822813171 +0000 UTC m=+1487.261937671" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.827004 5039 scope.go:117] "RemoveContainer" containerID="8a5598eb8354cf4b2962d73b351825d07554d321089648b8edee6aa50e8516c8" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.871824 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.893751 5039 scope.go:117] "RemoveContainer" containerID="3e8ad326f0e5aa69207aebcad58d3d771092003f9982ff1ef4cf628a7d8d46a7" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.900619 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 24 13:42:55 crc kubenswrapper[5039]: E1124 13:42:54.900902 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e8ad326f0e5aa69207aebcad58d3d771092003f9982ff1ef4cf628a7d8d46a7\": container with ID starting with 3e8ad326f0e5aa69207aebcad58d3d771092003f9982ff1ef4cf628a7d8d46a7 not found: ID does not exist" containerID="3e8ad326f0e5aa69207aebcad58d3d771092003f9982ff1ef4cf628a7d8d46a7" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.900938 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e8ad326f0e5aa69207aebcad58d3d771092003f9982ff1ef4cf628a7d8d46a7"} err="failed to get container status \"3e8ad326f0e5aa69207aebcad58d3d771092003f9982ff1ef4cf628a7d8d46a7\": rpc error: code = NotFound desc = could not find container \"3e8ad326f0e5aa69207aebcad58d3d771092003f9982ff1ef4cf628a7d8d46a7\": container with ID starting with 3e8ad326f0e5aa69207aebcad58d3d771092003f9982ff1ef4cf628a7d8d46a7 not found: ID does not exist" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.900962 5039 scope.go:117] "RemoveContainer" containerID="8a5598eb8354cf4b2962d73b351825d07554d321089648b8edee6aa50e8516c8" Nov 24 13:42:55 crc kubenswrapper[5039]: E1124 13:42:54.901406 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a5598eb8354cf4b2962d73b351825d07554d321089648b8edee6aa50e8516c8\": container with ID starting with 8a5598eb8354cf4b2962d73b351825d07554d321089648b8edee6aa50e8516c8 not found: ID does not exist" containerID="8a5598eb8354cf4b2962d73b351825d07554d321089648b8edee6aa50e8516c8" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.901449 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a5598eb8354cf4b2962d73b351825d07554d321089648b8edee6aa50e8516c8"} err="failed to get container status \"8a5598eb8354cf4b2962d73b351825d07554d321089648b8edee6aa50e8516c8\": rpc error: code = NotFound desc = could not find container \"8a5598eb8354cf4b2962d73b351825d07554d321089648b8edee6aa50e8516c8\": container with ID starting with 8a5598eb8354cf4b2962d73b351825d07554d321089648b8edee6aa50e8516c8 not found: ID does not exist" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.911553 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 24 13:42:55 crc kubenswrapper[5039]: E1124 13:42:54.912057 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40c70f05-786b-43b9-9bb5-87b6f7907cc1" containerName="nova-api-api" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.912072 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="40c70f05-786b-43b9-9bb5-87b6f7907cc1" containerName="nova-api-api" Nov 24 13:42:55 crc kubenswrapper[5039]: E1124 13:42:54.912102 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40c70f05-786b-43b9-9bb5-87b6f7907cc1" containerName="nova-api-log" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.912110 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="40c70f05-786b-43b9-9bb5-87b6f7907cc1" containerName="nova-api-log" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.912379 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="40c70f05-786b-43b9-9bb5-87b6f7907cc1" containerName="nova-api-log" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.912401 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="40c70f05-786b-43b9-9bb5-87b6f7907cc1" containerName="nova-api-api" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.913748 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.918084 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.921666 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:54.938087 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:55.082106 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fcwbn\" (UniqueName: \"kubernetes.io/projected/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-kube-api-access-fcwbn\") pod \"nova-api-0\" (UID: \"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443\") " pod="openstack/nova-api-0" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:55.082572 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-config-data\") pod \"nova-api-0\" (UID: \"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443\") " pod="openstack/nova-api-0" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:55.082837 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443\") " pod="openstack/nova-api-0" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:55.082879 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-logs\") pod \"nova-api-0\" (UID: \"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443\") " pod="openstack/nova-api-0" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:55.185754 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-config-data\") pod \"nova-api-0\" (UID: \"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443\") " pod="openstack/nova-api-0" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:55.186029 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443\") " pod="openstack/nova-api-0" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:55.186137 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-logs\") pod \"nova-api-0\" (UID: \"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443\") " pod="openstack/nova-api-0" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:55.186184 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fcwbn\" (UniqueName: \"kubernetes.io/projected/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-kube-api-access-fcwbn\") pod \"nova-api-0\" (UID: \"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443\") " pod="openstack/nova-api-0" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:55.186512 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-logs\") pod \"nova-api-0\" (UID: \"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443\") " pod="openstack/nova-api-0" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:55.191530 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-config-data\") pod \"nova-api-0\" (UID: \"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443\") " pod="openstack/nova-api-0" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:55.191735 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443\") " pod="openstack/nova-api-0" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:55.211033 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fcwbn\" (UniqueName: \"kubernetes.io/projected/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-kube-api-access-fcwbn\") pod \"nova-api-0\" (UID: \"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443\") " pod="openstack/nova-api-0" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:55.369284 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:55.811642 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b","Type":"ContainerStarted","Data":"62bf54e687cfd6eea3f1d770802c144f1876aee8edafc03614fd2e61aa042c70"} Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:55.819058 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"366e8969-e52e-4bdf-8171-18d9dedff03c","Type":"ContainerStarted","Data":"7841fe8d5a386463852ff5a0699752832a5d497deff78e1603e25ba99a718fbc"} Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:55.819144 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"366e8969-e52e-4bdf-8171-18d9dedff03c","Type":"ContainerStarted","Data":"f8744ca9780fd4ed4b303020159546d487c69bfc639f356160fe49b304c37eaf"} Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:55.836019 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.835980256 podStartE2EDuration="2.835980256s" podCreationTimestamp="2025-11-24 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:42:55.834124559 +0000 UTC m=+1488.273249059" watchObservedRunningTime="2025-11-24 13:42:55.835980256 +0000 UTC m=+1488.275104756" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:55.872665 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.872639629 podStartE2EDuration="2.872639629s" podCreationTimestamp="2025-11-24 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:42:55.851430596 +0000 UTC m=+1488.290555116" watchObservedRunningTime="2025-11-24 13:42:55.872639629 +0000 UTC m=+1488.311764129" Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:55.908603 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 13:42:55 crc kubenswrapper[5039]: W1124 13:42:55.928651 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0168214d_ac93_41c7_babc_e048a74fca46.slice/crio-326e269a5fc812f75e0b480e96e93e3705be740ce6e3c2e02a0726131e4b460c WatchSource:0}: Error finding container 326e269a5fc812f75e0b480e96e93e3705be740ce6e3c2e02a0726131e4b460c: Status 404 returned error can't find the container with id 326e269a5fc812f75e0b480e96e93e3705be740ce6e3c2e02a0726131e4b460c Nov 24 13:42:55 crc kubenswrapper[5039]: I1124 13:42:55.933300 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 24 13:42:56 crc kubenswrapper[5039]: I1124 13:42:56.033269 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:42:56 crc kubenswrapper[5039]: I1124 13:42:56.033637 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5c3dde36-1fba-4d3f-812f-20c2118aecaa" containerName="sg-core" containerID="cri-o://1ab5882346438ed134b9131b22c7c567a8ae07ff56e2310bd8b6145e8b911fb4" gracePeriod=30 Nov 24 13:42:56 crc kubenswrapper[5039]: I1124 13:42:56.033656 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5c3dde36-1fba-4d3f-812f-20c2118aecaa" containerName="proxy-httpd" containerID="cri-o://e658ae07e319c31fe236230f6a0c9d415d4023e9f542e782897b69e1791ea092" gracePeriod=30 Nov 24 13:42:56 crc kubenswrapper[5039]: I1124 13:42:56.033713 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5c3dde36-1fba-4d3f-812f-20c2118aecaa" containerName="ceilometer-notification-agent" containerID="cri-o://c2e6ba544a5a6ed1b9cffc2fb6bab004cf3517825a2d7d6738deb68cda50217f" gracePeriod=30 Nov 24 13:42:56 crc kubenswrapper[5039]: I1124 13:42:56.034387 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5c3dde36-1fba-4d3f-812f-20c2118aecaa" containerName="ceilometer-central-agent" containerID="cri-o://f0c59fc9f81fa0f7500b240f06a3a5b558f415016d7c1f34609ab5dfe30cbc87" gracePeriod=30 Nov 24 13:42:56 crc kubenswrapper[5039]: I1124 13:42:56.322993 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40c70f05-786b-43b9-9bb5-87b6f7907cc1" path="/var/lib/kubelet/pods/40c70f05-786b-43b9-9bb5-87b6f7907cc1/volumes" Nov 24 13:42:56 crc kubenswrapper[5039]: I1124 13:42:56.831048 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0168214d-ac93-41c7-babc-e048a74fca46","Type":"ContainerStarted","Data":"beb8453fafc170298ec053d21b44afd227725a0f96e46cb229b938bc0a550741"} Nov 24 13:42:56 crc kubenswrapper[5039]: I1124 13:42:56.831387 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0168214d-ac93-41c7-babc-e048a74fca46","Type":"ContainerStarted","Data":"326e269a5fc812f75e0b480e96e93e3705be740ce6e3c2e02a0726131e4b460c"} Nov 24 13:42:56 crc kubenswrapper[5039]: I1124 13:42:56.834475 5039 generic.go:334] "Generic (PLEG): container finished" podID="5c3dde36-1fba-4d3f-812f-20c2118aecaa" containerID="e658ae07e319c31fe236230f6a0c9d415d4023e9f542e782897b69e1791ea092" exitCode=0 Nov 24 13:42:56 crc kubenswrapper[5039]: I1124 13:42:56.834496 5039 generic.go:334] "Generic (PLEG): container finished" podID="5c3dde36-1fba-4d3f-812f-20c2118aecaa" containerID="1ab5882346438ed134b9131b22c7c567a8ae07ff56e2310bd8b6145e8b911fb4" exitCode=2 Nov 24 13:42:56 crc kubenswrapper[5039]: I1124 13:42:56.834519 5039 generic.go:334] "Generic (PLEG): container finished" podID="5c3dde36-1fba-4d3f-812f-20c2118aecaa" containerID="f0c59fc9f81fa0f7500b240f06a3a5b558f415016d7c1f34609ab5dfe30cbc87" exitCode=0 Nov 24 13:42:56 crc kubenswrapper[5039]: I1124 13:42:56.834552 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5c3dde36-1fba-4d3f-812f-20c2118aecaa","Type":"ContainerDied","Data":"e658ae07e319c31fe236230f6a0c9d415d4023e9f542e782897b69e1791ea092"} Nov 24 13:42:56 crc kubenswrapper[5039]: I1124 13:42:56.834588 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5c3dde36-1fba-4d3f-812f-20c2118aecaa","Type":"ContainerDied","Data":"1ab5882346438ed134b9131b22c7c567a8ae07ff56e2310bd8b6145e8b911fb4"} Nov 24 13:42:56 crc kubenswrapper[5039]: I1124 13:42:56.834601 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5c3dde36-1fba-4d3f-812f-20c2118aecaa","Type":"ContainerDied","Data":"f0c59fc9f81fa0f7500b240f06a3a5b558f415016d7c1f34609ab5dfe30cbc87"} Nov 24 13:42:56 crc kubenswrapper[5039]: I1124 13:42:56.836142 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443","Type":"ContainerStarted","Data":"224cfb9288ec9a52c6d1ef4b70b3fe444c88318831c75bd87a70ff638dca7a54"} Nov 24 13:42:56 crc kubenswrapper[5039]: I1124 13:42:56.836193 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443","Type":"ContainerStarted","Data":"d3fb66e806b0ddadf9363c04962e0af179907d8f84096395d6cd1b010e804771"} Nov 24 13:42:56 crc kubenswrapper[5039]: I1124 13:42:56.836207 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443","Type":"ContainerStarted","Data":"f26ee9d9a1b9d2100d6527528a4c5e6e0e28cd8b94e88143ad37749aa4ad9758"} Nov 24 13:42:56 crc kubenswrapper[5039]: I1124 13:42:56.868310 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.86828546 podStartE2EDuration="2.86828546s" podCreationTimestamp="2025-11-24 13:42:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:42:56.859326841 +0000 UTC m=+1489.298451351" watchObservedRunningTime="2025-11-24 13:42:56.86828546 +0000 UTC m=+1489.307409960" Nov 24 13:42:57 crc kubenswrapper[5039]: I1124 13:42:57.849775 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 24 13:42:58 crc kubenswrapper[5039]: I1124 13:42:58.138259 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 24 13:42:58 crc kubenswrapper[5039]: I1124 13:42:58.530396 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 24 13:42:58 crc kubenswrapper[5039]: I1124 13:42:58.531404 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 24 13:42:58 crc kubenswrapper[5039]: I1124 13:42:58.868086 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0168214d-ac93-41c7-babc-e048a74fca46","Type":"ContainerStarted","Data":"9c80abddb304f82db0447974336a724f454f32d5e1537735e5bf4d039cd2ec4c"} Nov 24 13:42:59 crc kubenswrapper[5039]: I1124 13:42:59.139876 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 24 13:42:59 crc kubenswrapper[5039]: I1124 13:42:59.880547 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0168214d-ac93-41c7-babc-e048a74fca46","Type":"ContainerStarted","Data":"eebe295f1f6a3e8cad4905c2789044b8df990702f9bd0b8b5e85507c6c52e4ed"} Nov 24 13:43:00 crc kubenswrapper[5039]: I1124 13:43:00.239588 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 13:43:00 crc kubenswrapper[5039]: I1124 13:43:00.239764 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="47799b2c-4219-475d-9a09-580720622ee4" containerName="kube-state-metrics" containerID="cri-o://019bf5ada2966b97ed9f89b360fb4923ba460479c6a6eeff6dde749ba67d748a" gracePeriod=30 Nov 24 13:43:00 crc kubenswrapper[5039]: I1124 13:43:00.391386 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 24 13:43:00 crc kubenswrapper[5039]: I1124 13:43:00.391667 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mysqld-exporter-0" podUID="699baf57-b50c-43fd-adc9-7ff6333294df" containerName="mysqld-exporter" containerID="cri-o://22711374ef6575b81dd01aa125a85a2b84b89d45f0761b9a5b7ebedcfbac5fd3" gracePeriod=30 Nov 24 13:43:00 crc kubenswrapper[5039]: I1124 13:43:00.887451 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 24 13:43:00 crc kubenswrapper[5039]: I1124 13:43:00.898407 5039 generic.go:334] "Generic (PLEG): container finished" podID="47799b2c-4219-475d-9a09-580720622ee4" containerID="019bf5ada2966b97ed9f89b360fb4923ba460479c6a6eeff6dde749ba67d748a" exitCode=2 Nov 24 13:43:00 crc kubenswrapper[5039]: I1124 13:43:00.898490 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"47799b2c-4219-475d-9a09-580720622ee4","Type":"ContainerDied","Data":"019bf5ada2966b97ed9f89b360fb4923ba460479c6a6eeff6dde749ba67d748a"} Nov 24 13:43:00 crc kubenswrapper[5039]: I1124 13:43:00.898531 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"47799b2c-4219-475d-9a09-580720622ee4","Type":"ContainerDied","Data":"13a858bf92c65e2dae183efe0fcd3025ac70c6c5fbd59ad24849564e6647e3cc"} Nov 24 13:43:00 crc kubenswrapper[5039]: I1124 13:43:00.898548 5039 scope.go:117] "RemoveContainer" containerID="019bf5ada2966b97ed9f89b360fb4923ba460479c6a6eeff6dde749ba67d748a" Nov 24 13:43:00 crc kubenswrapper[5039]: I1124 13:43:00.898656 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 24 13:43:00 crc kubenswrapper[5039]: I1124 13:43:00.903192 5039 generic.go:334] "Generic (PLEG): container finished" podID="699baf57-b50c-43fd-adc9-7ff6333294df" containerID="22711374ef6575b81dd01aa125a85a2b84b89d45f0761b9a5b7ebedcfbac5fd3" exitCode=2 Nov 24 13:43:00 crc kubenswrapper[5039]: I1124 13:43:00.903233 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"699baf57-b50c-43fd-adc9-7ff6333294df","Type":"ContainerDied","Data":"22711374ef6575b81dd01aa125a85a2b84b89d45f0761b9a5b7ebedcfbac5fd3"} Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.039199 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mk6n2\" (UniqueName: \"kubernetes.io/projected/47799b2c-4219-475d-9a09-580720622ee4-kube-api-access-mk6n2\") pod \"47799b2c-4219-475d-9a09-580720622ee4\" (UID: \"47799b2c-4219-475d-9a09-580720622ee4\") " Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.309519 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47799b2c-4219-475d-9a09-580720622ee4-kube-api-access-mk6n2" (OuterVolumeSpecName: "kube-api-access-mk6n2") pod "47799b2c-4219-475d-9a09-580720622ee4" (UID: "47799b2c-4219-475d-9a09-580720622ee4"). InnerVolumeSpecName "kube-api-access-mk6n2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.343336 5039 scope.go:117] "RemoveContainer" containerID="019bf5ada2966b97ed9f89b360fb4923ba460479c6a6eeff6dde749ba67d748a" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.347141 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mk6n2\" (UniqueName: \"kubernetes.io/projected/47799b2c-4219-475d-9a09-580720622ee4-kube-api-access-mk6n2\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:01 crc kubenswrapper[5039]: E1124 13:43:01.349802 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"019bf5ada2966b97ed9f89b360fb4923ba460479c6a6eeff6dde749ba67d748a\": container with ID starting with 019bf5ada2966b97ed9f89b360fb4923ba460479c6a6eeff6dde749ba67d748a not found: ID does not exist" containerID="019bf5ada2966b97ed9f89b360fb4923ba460479c6a6eeff6dde749ba67d748a" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.349848 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"019bf5ada2966b97ed9f89b360fb4923ba460479c6a6eeff6dde749ba67d748a"} err="failed to get container status \"019bf5ada2966b97ed9f89b360fb4923ba460479c6a6eeff6dde749ba67d748a\": rpc error: code = NotFound desc = could not find container \"019bf5ada2966b97ed9f89b360fb4923ba460479c6a6eeff6dde749ba67d748a\": container with ID starting with 019bf5ada2966b97ed9f89b360fb4923ba460479c6a6eeff6dde749ba67d748a not found: ID does not exist" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.470661 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.607538 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.629712 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.642075 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 13:43:01 crc kubenswrapper[5039]: E1124 13:43:01.642633 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="699baf57-b50c-43fd-adc9-7ff6333294df" containerName="mysqld-exporter" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.642651 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="699baf57-b50c-43fd-adc9-7ff6333294df" containerName="mysqld-exporter" Nov 24 13:43:01 crc kubenswrapper[5039]: E1124 13:43:01.642686 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47799b2c-4219-475d-9a09-580720622ee4" containerName="kube-state-metrics" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.642693 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="47799b2c-4219-475d-9a09-580720622ee4" containerName="kube-state-metrics" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.642954 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="47799b2c-4219-475d-9a09-580720622ee4" containerName="kube-state-metrics" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.642973 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="699baf57-b50c-43fd-adc9-7ff6333294df" containerName="mysqld-exporter" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.643712 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.647133 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.647183 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.661181 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/699baf57-b50c-43fd-adc9-7ff6333294df-combined-ca-bundle\") pod \"699baf57-b50c-43fd-adc9-7ff6333294df\" (UID: \"699baf57-b50c-43fd-adc9-7ff6333294df\") " Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.661270 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/699baf57-b50c-43fd-adc9-7ff6333294df-config-data\") pod \"699baf57-b50c-43fd-adc9-7ff6333294df\" (UID: \"699baf57-b50c-43fd-adc9-7ff6333294df\") " Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.661561 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrcvq\" (UniqueName: \"kubernetes.io/projected/699baf57-b50c-43fd-adc9-7ff6333294df-kube-api-access-wrcvq\") pod \"699baf57-b50c-43fd-adc9-7ff6333294df\" (UID: \"699baf57-b50c-43fd-adc9-7ff6333294df\") " Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.662090 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/07adf2a8-6758-4e5e-b757-6d32eebb1f93-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"07adf2a8-6758-4e5e-b757-6d32eebb1f93\") " pod="openstack/kube-state-metrics-0" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.662164 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlcz8\" (UniqueName: \"kubernetes.io/projected/07adf2a8-6758-4e5e-b757-6d32eebb1f93-kube-api-access-qlcz8\") pod \"kube-state-metrics-0\" (UID: \"07adf2a8-6758-4e5e-b757-6d32eebb1f93\") " pod="openstack/kube-state-metrics-0" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.662338 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07adf2a8-6758-4e5e-b757-6d32eebb1f93-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"07adf2a8-6758-4e5e-b757-6d32eebb1f93\") " pod="openstack/kube-state-metrics-0" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.662421 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/07adf2a8-6758-4e5e-b757-6d32eebb1f93-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"07adf2a8-6758-4e5e-b757-6d32eebb1f93\") " pod="openstack/kube-state-metrics-0" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.662560 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.667436 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/699baf57-b50c-43fd-adc9-7ff6333294df-kube-api-access-wrcvq" (OuterVolumeSpecName: "kube-api-access-wrcvq") pod "699baf57-b50c-43fd-adc9-7ff6333294df" (UID: "699baf57-b50c-43fd-adc9-7ff6333294df"). InnerVolumeSpecName "kube-api-access-wrcvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.707677 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/699baf57-b50c-43fd-adc9-7ff6333294df-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "699baf57-b50c-43fd-adc9-7ff6333294df" (UID: "699baf57-b50c-43fd-adc9-7ff6333294df"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.738726 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/699baf57-b50c-43fd-adc9-7ff6333294df-config-data" (OuterVolumeSpecName: "config-data") pod "699baf57-b50c-43fd-adc9-7ff6333294df" (UID: "699baf57-b50c-43fd-adc9-7ff6333294df"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.764258 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/07adf2a8-6758-4e5e-b757-6d32eebb1f93-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"07adf2a8-6758-4e5e-b757-6d32eebb1f93\") " pod="openstack/kube-state-metrics-0" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.764484 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/07adf2a8-6758-4e5e-b757-6d32eebb1f93-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"07adf2a8-6758-4e5e-b757-6d32eebb1f93\") " pod="openstack/kube-state-metrics-0" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.764561 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlcz8\" (UniqueName: \"kubernetes.io/projected/07adf2a8-6758-4e5e-b757-6d32eebb1f93-kube-api-access-qlcz8\") pod \"kube-state-metrics-0\" (UID: \"07adf2a8-6758-4e5e-b757-6d32eebb1f93\") " pod="openstack/kube-state-metrics-0" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.764730 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07adf2a8-6758-4e5e-b757-6d32eebb1f93-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"07adf2a8-6758-4e5e-b757-6d32eebb1f93\") " pod="openstack/kube-state-metrics-0" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.766738 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/699baf57-b50c-43fd-adc9-7ff6333294df-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.766922 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/699baf57-b50c-43fd-adc9-7ff6333294df-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.767057 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrcvq\" (UniqueName: \"kubernetes.io/projected/699baf57-b50c-43fd-adc9-7ff6333294df-kube-api-access-wrcvq\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.770431 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07adf2a8-6758-4e5e-b757-6d32eebb1f93-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"07adf2a8-6758-4e5e-b757-6d32eebb1f93\") " pod="openstack/kube-state-metrics-0" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.770925 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/07adf2a8-6758-4e5e-b757-6d32eebb1f93-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"07adf2a8-6758-4e5e-b757-6d32eebb1f93\") " pod="openstack/kube-state-metrics-0" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.772295 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/07adf2a8-6758-4e5e-b757-6d32eebb1f93-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"07adf2a8-6758-4e5e-b757-6d32eebb1f93\") " pod="openstack/kube-state-metrics-0" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.788144 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlcz8\" (UniqueName: \"kubernetes.io/projected/07adf2a8-6758-4e5e-b757-6d32eebb1f93-kube-api-access-qlcz8\") pod \"kube-state-metrics-0\" (UID: \"07adf2a8-6758-4e5e-b757-6d32eebb1f93\") " pod="openstack/kube-state-metrics-0" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.920197 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"699baf57-b50c-43fd-adc9-7ff6333294df","Type":"ContainerDied","Data":"5916b606db685d7a703ece6fbdec26b788140e571d42253420b43a1c8fa864f4"} Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.920266 5039 scope.go:117] "RemoveContainer" containerID="22711374ef6575b81dd01aa125a85a2b84b89d45f0761b9a5b7ebedcfbac5fd3" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.920223 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.928597 5039 generic.go:334] "Generic (PLEG): container finished" podID="5c3dde36-1fba-4d3f-812f-20c2118aecaa" containerID="c2e6ba544a5a6ed1b9cffc2fb6bab004cf3517825a2d7d6738deb68cda50217f" exitCode=0 Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.928667 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5c3dde36-1fba-4d3f-812f-20c2118aecaa","Type":"ContainerDied","Data":"c2e6ba544a5a6ed1b9cffc2fb6bab004cf3517825a2d7d6738deb68cda50217f"} Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.964912 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.970585 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.977825 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.994615 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Nov 24 13:43:01 crc kubenswrapper[5039]: I1124 13:43:01.998302 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.010090 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-mysqld-exporter-svc" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.012382 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.040699 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.072555 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ce1afd-e5d8-401a-8fb1-e02b6aff131b-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"52ce1afd-e5d8-401a-8fb1-e02b6aff131b\") " pod="openstack/mysqld-exporter-0" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.072647 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fj52\" (UniqueName: \"kubernetes.io/projected/52ce1afd-e5d8-401a-8fb1-e02b6aff131b-kube-api-access-7fj52\") pod \"mysqld-exporter-0\" (UID: \"52ce1afd-e5d8-401a-8fb1-e02b6aff131b\") " pod="openstack/mysqld-exporter-0" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.072722 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ce1afd-e5d8-401a-8fb1-e02b6aff131b-config-data\") pod \"mysqld-exporter-0\" (UID: \"52ce1afd-e5d8-401a-8fb1-e02b6aff131b\") " pod="openstack/mysqld-exporter-0" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.072976 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/52ce1afd-e5d8-401a-8fb1-e02b6aff131b-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"52ce1afd-e5d8-401a-8fb1-e02b6aff131b\") " pod="openstack/mysqld-exporter-0" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.175340 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/52ce1afd-e5d8-401a-8fb1-e02b6aff131b-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"52ce1afd-e5d8-401a-8fb1-e02b6aff131b\") " pod="openstack/mysqld-exporter-0" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.176077 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ce1afd-e5d8-401a-8fb1-e02b6aff131b-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"52ce1afd-e5d8-401a-8fb1-e02b6aff131b\") " pod="openstack/mysqld-exporter-0" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.176143 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fj52\" (UniqueName: \"kubernetes.io/projected/52ce1afd-e5d8-401a-8fb1-e02b6aff131b-kube-api-access-7fj52\") pod \"mysqld-exporter-0\" (UID: \"52ce1afd-e5d8-401a-8fb1-e02b6aff131b\") " pod="openstack/mysqld-exporter-0" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.176210 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ce1afd-e5d8-401a-8fb1-e02b6aff131b-config-data\") pod \"mysqld-exporter-0\" (UID: \"52ce1afd-e5d8-401a-8fb1-e02b6aff131b\") " pod="openstack/mysqld-exporter-0" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.182444 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ce1afd-e5d8-401a-8fb1-e02b6aff131b-config-data\") pod \"mysqld-exporter-0\" (UID: \"52ce1afd-e5d8-401a-8fb1-e02b6aff131b\") " pod="openstack/mysqld-exporter-0" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.182957 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/52ce1afd-e5d8-401a-8fb1-e02b6aff131b-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"52ce1afd-e5d8-401a-8fb1-e02b6aff131b\") " pod="openstack/mysqld-exporter-0" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.184015 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ce1afd-e5d8-401a-8fb1-e02b6aff131b-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"52ce1afd-e5d8-401a-8fb1-e02b6aff131b\") " pod="openstack/mysqld-exporter-0" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.209698 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fj52\" (UniqueName: \"kubernetes.io/projected/52ce1afd-e5d8-401a-8fb1-e02b6aff131b-kube-api-access-7fj52\") pod \"mysqld-exporter-0\" (UID: \"52ce1afd-e5d8-401a-8fb1-e02b6aff131b\") " pod="openstack/mysqld-exporter-0" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.301252 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.329379 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.331368 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47799b2c-4219-475d-9a09-580720622ee4" path="/var/lib/kubelet/pods/47799b2c-4219-475d-9a09-580720622ee4/volumes" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.331951 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="699baf57-b50c-43fd-adc9-7ff6333294df" path="/var/lib/kubelet/pods/699baf57-b50c-43fd-adc9-7ff6333294df/volumes" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.384440 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5c3dde36-1fba-4d3f-812f-20c2118aecaa-run-httpd\") pod \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.384825 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-config-data\") pod \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.384858 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-scripts\") pod \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.384940 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-combined-ca-bundle\") pod \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.384977 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5c3dde36-1fba-4d3f-812f-20c2118aecaa-log-httpd\") pod \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.384998 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-sg-core-conf-yaml\") pod \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.385027 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2fvl9\" (UniqueName: \"kubernetes.io/projected/5c3dde36-1fba-4d3f-812f-20c2118aecaa-kube-api-access-2fvl9\") pod \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\" (UID: \"5c3dde36-1fba-4d3f-812f-20c2118aecaa\") " Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.386080 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c3dde36-1fba-4d3f-812f-20c2118aecaa-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5c3dde36-1fba-4d3f-812f-20c2118aecaa" (UID: "5c3dde36-1fba-4d3f-812f-20c2118aecaa"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.390844 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c3dde36-1fba-4d3f-812f-20c2118aecaa-kube-api-access-2fvl9" (OuterVolumeSpecName: "kube-api-access-2fvl9") pod "5c3dde36-1fba-4d3f-812f-20c2118aecaa" (UID: "5c3dde36-1fba-4d3f-812f-20c2118aecaa"). InnerVolumeSpecName "kube-api-access-2fvl9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.391962 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c3dde36-1fba-4d3f-812f-20c2118aecaa-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5c3dde36-1fba-4d3f-812f-20c2118aecaa" (UID: "5c3dde36-1fba-4d3f-812f-20c2118aecaa"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.394197 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-scripts" (OuterVolumeSpecName: "scripts") pod "5c3dde36-1fba-4d3f-812f-20c2118aecaa" (UID: "5c3dde36-1fba-4d3f-812f-20c2118aecaa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.425632 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "5c3dde36-1fba-4d3f-812f-20c2118aecaa" (UID: "5c3dde36-1fba-4d3f-812f-20c2118aecaa"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.487641 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5c3dde36-1fba-4d3f-812f-20c2118aecaa" (UID: "5c3dde36-1fba-4d3f-812f-20c2118aecaa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.488806 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.488833 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.488846 5039 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5c3dde36-1fba-4d3f-812f-20c2118aecaa-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.488860 5039 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.488870 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2fvl9\" (UniqueName: \"kubernetes.io/projected/5c3dde36-1fba-4d3f-812f-20c2118aecaa-kube-api-access-2fvl9\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.488884 5039 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5c3dde36-1fba-4d3f-812f-20c2118aecaa-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.542690 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-config-data" (OuterVolumeSpecName: "config-data") pod "5c3dde36-1fba-4d3f-812f-20c2118aecaa" (UID: "5c3dde36-1fba-4d3f-812f-20c2118aecaa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.591233 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c3dde36-1fba-4d3f-812f-20c2118aecaa-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.612542 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 13:43:02 crc kubenswrapper[5039]: W1124 13:43:02.827689 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod52ce1afd_e5d8_401a_8fb1_e02b6aff131b.slice/crio-ab667350a0654cb3e3d123d57acda2b164df31d68f8d6cb15d8431ce51f09e85 WatchSource:0}: Error finding container ab667350a0654cb3e3d123d57acda2b164df31d68f8d6cb15d8431ce51f09e85: Status 404 returned error can't find the container with id ab667350a0654cb3e3d123d57acda2b164df31d68f8d6cb15d8431ce51f09e85 Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.829019 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.942364 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"07adf2a8-6758-4e5e-b757-6d32eebb1f93","Type":"ContainerStarted","Data":"c4d98023ea29949b84b2f0e8333debfd329512f78b83cd9b8e61384ab3600d23"} Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.946374 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0168214d-ac93-41c7-babc-e048a74fca46","Type":"ContainerStarted","Data":"40d5dd8ff768e378422a7e4617d363fa85e7c9000cf7dab745e477474cccec47"} Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.946585 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="0168214d-ac93-41c7-babc-e048a74fca46" containerName="aodh-api" containerID="cri-o://beb8453fafc170298ec053d21b44afd227725a0f96e46cb229b938bc0a550741" gracePeriod=30 Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.947162 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="0168214d-ac93-41c7-babc-e048a74fca46" containerName="aodh-listener" containerID="cri-o://40d5dd8ff768e378422a7e4617d363fa85e7c9000cf7dab745e477474cccec47" gracePeriod=30 Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.947262 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="0168214d-ac93-41c7-babc-e048a74fca46" containerName="aodh-notifier" containerID="cri-o://eebe295f1f6a3e8cad4905c2789044b8df990702f9bd0b8b5e85507c6c52e4ed" gracePeriod=30 Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.947312 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="0168214d-ac93-41c7-babc-e048a74fca46" containerName="aodh-evaluator" containerID="cri-o://9c80abddb304f82db0447974336a724f454f32d5e1537735e5bf4d039cd2ec4c" gracePeriod=30 Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.951715 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.951738 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5c3dde36-1fba-4d3f-812f-20c2118aecaa","Type":"ContainerDied","Data":"ac3ab405f52449a5ef47711971cab5c27f7f68ba329daf474446c7d97daa6d9b"} Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.951934 5039 scope.go:117] "RemoveContainer" containerID="e658ae07e319c31fe236230f6a0c9d415d4023e9f542e782897b69e1791ea092" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.954804 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"52ce1afd-e5d8-401a-8fb1-e02b6aff131b","Type":"ContainerStarted","Data":"ab667350a0654cb3e3d123d57acda2b164df31d68f8d6cb15d8431ce51f09e85"} Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.979956 5039 scope.go:117] "RemoveContainer" containerID="1ab5882346438ed134b9131b22c7c567a8ae07ff56e2310bd8b6145e8b911fb4" Nov 24 13:43:02 crc kubenswrapper[5039]: I1124 13:43:02.984785 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=3.893953024 podStartE2EDuration="9.984764896s" podCreationTimestamp="2025-11-24 13:42:53 +0000 UTC" firstStartedPulling="2025-11-24 13:42:55.93764475 +0000 UTC m=+1488.376769250" lastFinishedPulling="2025-11-24 13:43:02.028456612 +0000 UTC m=+1494.467581122" observedRunningTime="2025-11-24 13:43:02.978852692 +0000 UTC m=+1495.417977192" watchObservedRunningTime="2025-11-24 13:43:02.984764896 +0000 UTC m=+1495.423889396" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.059942 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.084803 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.086528 5039 scope.go:117] "RemoveContainer" containerID="c2e6ba544a5a6ed1b9cffc2fb6bab004cf3517825a2d7d6738deb68cda50217f" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.099796 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:03 crc kubenswrapper[5039]: E1124 13:43:03.100559 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c3dde36-1fba-4d3f-812f-20c2118aecaa" containerName="proxy-httpd" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.100776 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c3dde36-1fba-4d3f-812f-20c2118aecaa" containerName="proxy-httpd" Nov 24 13:43:03 crc kubenswrapper[5039]: E1124 13:43:03.100797 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c3dde36-1fba-4d3f-812f-20c2118aecaa" containerName="ceilometer-central-agent" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.100805 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c3dde36-1fba-4d3f-812f-20c2118aecaa" containerName="ceilometer-central-agent" Nov 24 13:43:03 crc kubenswrapper[5039]: E1124 13:43:03.100828 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c3dde36-1fba-4d3f-812f-20c2118aecaa" containerName="ceilometer-notification-agent" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.100836 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c3dde36-1fba-4d3f-812f-20c2118aecaa" containerName="ceilometer-notification-agent" Nov 24 13:43:03 crc kubenswrapper[5039]: E1124 13:43:03.100871 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c3dde36-1fba-4d3f-812f-20c2118aecaa" containerName="sg-core" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.100879 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c3dde36-1fba-4d3f-812f-20c2118aecaa" containerName="sg-core" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.101150 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c3dde36-1fba-4d3f-812f-20c2118aecaa" containerName="ceilometer-central-agent" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.101178 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c3dde36-1fba-4d3f-812f-20c2118aecaa" containerName="sg-core" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.101196 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c3dde36-1fba-4d3f-812f-20c2118aecaa" containerName="proxy-httpd" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.101221 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c3dde36-1fba-4d3f-812f-20c2118aecaa" containerName="ceilometer-notification-agent" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.103771 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.109129 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a180464-260b-456c-bb96-c83c69cd2258-run-httpd\") pod \"ceilometer-0\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.109193 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.109249 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghtwl\" (UniqueName: \"kubernetes.io/projected/2a180464-260b-456c-bb96-c83c69cd2258-kube-api-access-ghtwl\") pod \"ceilometer-0\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.109271 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a180464-260b-456c-bb96-c83c69cd2258-log-httpd\") pod \"ceilometer-0\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.109442 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-config-data\") pod \"ceilometer-0\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.109484 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-scripts\") pod \"ceilometer-0\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.109585 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.111387 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.111958 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.117088 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.211799 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-config-data\") pod \"ceilometer-0\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.211859 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-scripts\") pod \"ceilometer-0\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.211908 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.211944 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a180464-260b-456c-bb96-c83c69cd2258-run-httpd\") pod \"ceilometer-0\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.211978 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.212030 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghtwl\" (UniqueName: \"kubernetes.io/projected/2a180464-260b-456c-bb96-c83c69cd2258-kube-api-access-ghtwl\") pod \"ceilometer-0\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.212052 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a180464-260b-456c-bb96-c83c69cd2258-log-httpd\") pod \"ceilometer-0\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.212912 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a180464-260b-456c-bb96-c83c69cd2258-log-httpd\") pod \"ceilometer-0\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.212996 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a180464-260b-456c-bb96-c83c69cd2258-run-httpd\") pod \"ceilometer-0\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.229290 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.229722 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-config-data\") pod \"ceilometer-0\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.248207 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-scripts\") pod \"ceilometer-0\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.251874 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.254056 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghtwl\" (UniqueName: \"kubernetes.io/projected/2a180464-260b-456c-bb96-c83c69cd2258-kube-api-access-ghtwl\") pod \"ceilometer-0\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.273900 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.275904 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.295483 5039 scope.go:117] "RemoveContainer" containerID="f0c59fc9f81fa0f7500b240f06a3a5b558f415016d7c1f34609ab5dfe30cbc87" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.533494 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.534140 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.850961 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:03 crc kubenswrapper[5039]: W1124 13:43:03.868031 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a180464_260b_456c_bb96_c83c69cd2258.slice/crio-92bf6ee72ff68a7a7298ff2aef698fb4a54e003083f0b9360c1eb721ffab2693 WatchSource:0}: Error finding container 92bf6ee72ff68a7a7298ff2aef698fb4a54e003083f0b9360c1eb721ffab2693: Status 404 returned error can't find the container with id 92bf6ee72ff68a7a7298ff2aef698fb4a54e003083f0b9360c1eb721ffab2693 Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.969937 5039 generic.go:334] "Generic (PLEG): container finished" podID="0168214d-ac93-41c7-babc-e048a74fca46" containerID="9c80abddb304f82db0447974336a724f454f32d5e1537735e5bf4d039cd2ec4c" exitCode=0 Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.969976 5039 generic.go:334] "Generic (PLEG): container finished" podID="0168214d-ac93-41c7-babc-e048a74fca46" containerID="beb8453fafc170298ec053d21b44afd227725a0f96e46cb229b938bc0a550741" exitCode=0 Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.969986 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0168214d-ac93-41c7-babc-e048a74fca46","Type":"ContainerDied","Data":"9c80abddb304f82db0447974336a724f454f32d5e1537735e5bf4d039cd2ec4c"} Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.970027 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0168214d-ac93-41c7-babc-e048a74fca46","Type":"ContainerDied","Data":"beb8453fafc170298ec053d21b44afd227725a0f96e46cb229b938bc0a550741"} Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.971555 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a180464-260b-456c-bb96-c83c69cd2258","Type":"ContainerStarted","Data":"92bf6ee72ff68a7a7298ff2aef698fb4a54e003083f0b9360c1eb721ffab2693"} Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.973279 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"52ce1afd-e5d8-401a-8fb1-e02b6aff131b","Type":"ContainerStarted","Data":"c0d3407d6d34c475856aeac44fd31ecd5403df4d352999f66f1ab9071fef9b10"} Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.974837 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"07adf2a8-6758-4e5e-b757-6d32eebb1f93","Type":"ContainerStarted","Data":"c68a3d4782aa6c82cf63e754cb767a7664a7cdeb7564d9ce0fcc32fee733ce51"} Nov 24 13:43:03 crc kubenswrapper[5039]: I1124 13:43:03.975012 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 24 13:43:04 crc kubenswrapper[5039]: I1124 13:43:04.009719 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=2.409780219 podStartE2EDuration="3.009697379s" podCreationTimestamp="2025-11-24 13:43:01 +0000 UTC" firstStartedPulling="2025-11-24 13:43:02.829990012 +0000 UTC m=+1495.269114512" lastFinishedPulling="2025-11-24 13:43:03.429907172 +0000 UTC m=+1495.869031672" observedRunningTime="2025-11-24 13:43:03.99134241 +0000 UTC m=+1496.430466910" watchObservedRunningTime="2025-11-24 13:43:04.009697379 +0000 UTC m=+1496.448821879" Nov 24 13:43:04 crc kubenswrapper[5039]: I1124 13:43:04.017193 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.539161625 podStartE2EDuration="3.017173613s" podCreationTimestamp="2025-11-24 13:43:01 +0000 UTC" firstStartedPulling="2025-11-24 13:43:02.608732512 +0000 UTC m=+1495.047857022" lastFinishedPulling="2025-11-24 13:43:03.08674452 +0000 UTC m=+1495.525869010" observedRunningTime="2025-11-24 13:43:04.010480709 +0000 UTC m=+1496.449605219" watchObservedRunningTime="2025-11-24 13:43:04.017173613 +0000 UTC m=+1496.456298113" Nov 24 13:43:04 crc kubenswrapper[5039]: I1124 13:43:04.149747 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 24 13:43:04 crc kubenswrapper[5039]: I1124 13:43:04.188812 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 24 13:43:04 crc kubenswrapper[5039]: I1124 13:43:04.317963 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c3dde36-1fba-4d3f-812f-20c2118aecaa" path="/var/lib/kubelet/pods/5c3dde36-1fba-4d3f-812f-20c2118aecaa/volumes" Nov 24 13:43:04 crc kubenswrapper[5039]: I1124 13:43:04.566757 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.233:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 24 13:43:04 crc kubenswrapper[5039]: I1124 13:43:04.566761 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.233:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 24 13:43:04 crc kubenswrapper[5039]: I1124 13:43:04.994335 5039 generic.go:334] "Generic (PLEG): container finished" podID="0168214d-ac93-41c7-babc-e048a74fca46" containerID="eebe295f1f6a3e8cad4905c2789044b8df990702f9bd0b8b5e85507c6c52e4ed" exitCode=0 Nov 24 13:43:04 crc kubenswrapper[5039]: I1124 13:43:04.994444 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0168214d-ac93-41c7-babc-e048a74fca46","Type":"ContainerDied","Data":"eebe295f1f6a3e8cad4905c2789044b8df990702f9bd0b8b5e85507c6c52e4ed"} Nov 24 13:43:04 crc kubenswrapper[5039]: I1124 13:43:04.998856 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a180464-260b-456c-bb96-c83c69cd2258","Type":"ContainerStarted","Data":"1fc155fd6c20a08dc1d830dfb10c1e6a7031af7d1ac70dfa1320ea045ec36c15"} Nov 24 13:43:05 crc kubenswrapper[5039]: I1124 13:43:05.268131 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 24 13:43:05 crc kubenswrapper[5039]: I1124 13:43:05.369568 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 13:43:05 crc kubenswrapper[5039]: I1124 13:43:05.369619 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 13:43:06 crc kubenswrapper[5039]: I1124 13:43:06.010693 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a180464-260b-456c-bb96-c83c69cd2258","Type":"ContainerStarted","Data":"5a2eb444843b3cbf316ddee87e72fa637ebe34b8691a6784f88fd000126d4199"} Nov 24 13:43:06 crc kubenswrapper[5039]: I1124 13:43:06.411677 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.236:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 24 13:43:06 crc kubenswrapper[5039]: I1124 13:43:06.457703 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.236:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 24 13:43:07 crc kubenswrapper[5039]: I1124 13:43:07.022760 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a180464-260b-456c-bb96-c83c69cd2258","Type":"ContainerStarted","Data":"79ed7e8384e791de05e91f18b762eadd3466a7bd5f526803d193c69153cd418d"} Nov 24 13:43:08 crc kubenswrapper[5039]: I1124 13:43:08.035240 5039 generic.go:334] "Generic (PLEG): container finished" podID="2a180464-260b-456c-bb96-c83c69cd2258" containerID="485316d66402a90fce343fa4d987881873a566b586c857bcd9180834bf4083f2" exitCode=1 Nov 24 13:43:08 crc kubenswrapper[5039]: I1124 13:43:08.035280 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a180464-260b-456c-bb96-c83c69cd2258","Type":"ContainerDied","Data":"485316d66402a90fce343fa4d987881873a566b586c857bcd9180834bf4083f2"} Nov 24 13:43:08 crc kubenswrapper[5039]: I1124 13:43:08.035386 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2a180464-260b-456c-bb96-c83c69cd2258" containerName="ceilometer-central-agent" containerID="cri-o://1fc155fd6c20a08dc1d830dfb10c1e6a7031af7d1ac70dfa1320ea045ec36c15" gracePeriod=30 Nov 24 13:43:08 crc kubenswrapper[5039]: I1124 13:43:08.035418 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2a180464-260b-456c-bb96-c83c69cd2258" containerName="ceilometer-notification-agent" containerID="cri-o://5a2eb444843b3cbf316ddee87e72fa637ebe34b8691a6784f88fd000126d4199" gracePeriod=30 Nov 24 13:43:08 crc kubenswrapper[5039]: I1124 13:43:08.035462 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2a180464-260b-456c-bb96-c83c69cd2258" containerName="sg-core" containerID="cri-o://79ed7e8384e791de05e91f18b762eadd3466a7bd5f526803d193c69153cd418d" gracePeriod=30 Nov 24 13:43:09 crc kubenswrapper[5039]: I1124 13:43:09.049159 5039 generic.go:334] "Generic (PLEG): container finished" podID="2a180464-260b-456c-bb96-c83c69cd2258" containerID="79ed7e8384e791de05e91f18b762eadd3466a7bd5f526803d193c69153cd418d" exitCode=2 Nov 24 13:43:09 crc kubenswrapper[5039]: I1124 13:43:09.049478 5039 generic.go:334] "Generic (PLEG): container finished" podID="2a180464-260b-456c-bb96-c83c69cd2258" containerID="5a2eb444843b3cbf316ddee87e72fa637ebe34b8691a6784f88fd000126d4199" exitCode=0 Nov 24 13:43:09 crc kubenswrapper[5039]: I1124 13:43:09.049269 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a180464-260b-456c-bb96-c83c69cd2258","Type":"ContainerDied","Data":"79ed7e8384e791de05e91f18b762eadd3466a7bd5f526803d193c69153cd418d"} Nov 24 13:43:09 crc kubenswrapper[5039]: I1124 13:43:09.049541 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a180464-260b-456c-bb96-c83c69cd2258","Type":"ContainerDied","Data":"5a2eb444843b3cbf316ddee87e72fa637ebe34b8691a6784f88fd000126d4199"} Nov 24 13:43:10 crc kubenswrapper[5039]: I1124 13:43:10.797186 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:43:10 crc kubenswrapper[5039]: I1124 13:43:10.926194 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ghtwl\" (UniqueName: \"kubernetes.io/projected/2a180464-260b-456c-bb96-c83c69cd2258-kube-api-access-ghtwl\") pod \"2a180464-260b-456c-bb96-c83c69cd2258\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " Nov 24 13:43:10 crc kubenswrapper[5039]: I1124 13:43:10.926531 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-config-data\") pod \"2a180464-260b-456c-bb96-c83c69cd2258\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " Nov 24 13:43:10 crc kubenswrapper[5039]: I1124 13:43:10.926632 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a180464-260b-456c-bb96-c83c69cd2258-run-httpd\") pod \"2a180464-260b-456c-bb96-c83c69cd2258\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " Nov 24 13:43:10 crc kubenswrapper[5039]: I1124 13:43:10.926672 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a180464-260b-456c-bb96-c83c69cd2258-log-httpd\") pod \"2a180464-260b-456c-bb96-c83c69cd2258\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " Nov 24 13:43:10 crc kubenswrapper[5039]: I1124 13:43:10.926783 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-combined-ca-bundle\") pod \"2a180464-260b-456c-bb96-c83c69cd2258\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " Nov 24 13:43:10 crc kubenswrapper[5039]: I1124 13:43:10.926868 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-scripts\") pod \"2a180464-260b-456c-bb96-c83c69cd2258\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " Nov 24 13:43:10 crc kubenswrapper[5039]: I1124 13:43:10.926906 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-sg-core-conf-yaml\") pod \"2a180464-260b-456c-bb96-c83c69cd2258\" (UID: \"2a180464-260b-456c-bb96-c83c69cd2258\") " Nov 24 13:43:10 crc kubenswrapper[5039]: I1124 13:43:10.927634 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a180464-260b-456c-bb96-c83c69cd2258-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "2a180464-260b-456c-bb96-c83c69cd2258" (UID: "2a180464-260b-456c-bb96-c83c69cd2258"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:43:10 crc kubenswrapper[5039]: I1124 13:43:10.928059 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a180464-260b-456c-bb96-c83c69cd2258-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "2a180464-260b-456c-bb96-c83c69cd2258" (UID: "2a180464-260b-456c-bb96-c83c69cd2258"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:43:10 crc kubenswrapper[5039]: I1124 13:43:10.931994 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a180464-260b-456c-bb96-c83c69cd2258-kube-api-access-ghtwl" (OuterVolumeSpecName: "kube-api-access-ghtwl") pod "2a180464-260b-456c-bb96-c83c69cd2258" (UID: "2a180464-260b-456c-bb96-c83c69cd2258"). InnerVolumeSpecName "kube-api-access-ghtwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:43:10 crc kubenswrapper[5039]: I1124 13:43:10.932902 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-scripts" (OuterVolumeSpecName: "scripts") pod "2a180464-260b-456c-bb96-c83c69cd2258" (UID: "2a180464-260b-456c-bb96-c83c69cd2258"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:10 crc kubenswrapper[5039]: I1124 13:43:10.974579 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "2a180464-260b-456c-bb96-c83c69cd2258" (UID: "2a180464-260b-456c-bb96-c83c69cd2258"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.024467 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2a180464-260b-456c-bb96-c83c69cd2258" (UID: "2a180464-260b-456c-bb96-c83c69cd2258"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.028890 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ghtwl\" (UniqueName: \"kubernetes.io/projected/2a180464-260b-456c-bb96-c83c69cd2258-kube-api-access-ghtwl\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.028923 5039 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a180464-260b-456c-bb96-c83c69cd2258-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.028935 5039 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a180464-260b-456c-bb96-c83c69cd2258-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.028945 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.028953 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.028962 5039 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.048123 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-config-data" (OuterVolumeSpecName: "config-data") pod "2a180464-260b-456c-bb96-c83c69cd2258" (UID: "2a180464-260b-456c-bb96-c83c69cd2258"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.071216 5039 generic.go:334] "Generic (PLEG): container finished" podID="2a180464-260b-456c-bb96-c83c69cd2258" containerID="1fc155fd6c20a08dc1d830dfb10c1e6a7031af7d1ac70dfa1320ea045ec36c15" exitCode=0 Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.071263 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a180464-260b-456c-bb96-c83c69cd2258","Type":"ContainerDied","Data":"1fc155fd6c20a08dc1d830dfb10c1e6a7031af7d1ac70dfa1320ea045ec36c15"} Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.071290 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a180464-260b-456c-bb96-c83c69cd2258","Type":"ContainerDied","Data":"92bf6ee72ff68a7a7298ff2aef698fb4a54e003083f0b9360c1eb721ffab2693"} Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.071296 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.071309 5039 scope.go:117] "RemoveContainer" containerID="485316d66402a90fce343fa4d987881873a566b586c857bcd9180834bf4083f2" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.110961 5039 scope.go:117] "RemoveContainer" containerID="79ed7e8384e791de05e91f18b762eadd3466a7bd5f526803d193c69153cd418d" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.117698 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.131105 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.131173 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a180464-260b-456c-bb96-c83c69cd2258-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.149344 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:11 crc kubenswrapper[5039]: E1124 13:43:11.149918 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a180464-260b-456c-bb96-c83c69cd2258" containerName="proxy-httpd" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.149941 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a180464-260b-456c-bb96-c83c69cd2258" containerName="proxy-httpd" Nov 24 13:43:11 crc kubenswrapper[5039]: E1124 13:43:11.149982 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a180464-260b-456c-bb96-c83c69cd2258" containerName="ceilometer-central-agent" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.149990 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a180464-260b-456c-bb96-c83c69cd2258" containerName="ceilometer-central-agent" Nov 24 13:43:11 crc kubenswrapper[5039]: E1124 13:43:11.150018 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a180464-260b-456c-bb96-c83c69cd2258" containerName="sg-core" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.150026 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a180464-260b-456c-bb96-c83c69cd2258" containerName="sg-core" Nov 24 13:43:11 crc kubenswrapper[5039]: E1124 13:43:11.150049 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a180464-260b-456c-bb96-c83c69cd2258" containerName="ceilometer-notification-agent" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.150056 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a180464-260b-456c-bb96-c83c69cd2258" containerName="ceilometer-notification-agent" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.150288 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a180464-260b-456c-bb96-c83c69cd2258" containerName="proxy-httpd" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.150316 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a180464-260b-456c-bb96-c83c69cd2258" containerName="sg-core" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.150334 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a180464-260b-456c-bb96-c83c69cd2258" containerName="ceilometer-central-agent" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.150362 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a180464-260b-456c-bb96-c83c69cd2258" containerName="ceilometer-notification-agent" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.152959 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.156176 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.156565 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.163820 5039 scope.go:117] "RemoveContainer" containerID="5a2eb444843b3cbf316ddee87e72fa637ebe34b8691a6784f88fd000126d4199" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.164296 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.184809 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.196658 5039 scope.go:117] "RemoveContainer" containerID="1fc155fd6c20a08dc1d830dfb10c1e6a7031af7d1ac70dfa1320ea045ec36c15" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.222395 5039 scope.go:117] "RemoveContainer" containerID="485316d66402a90fce343fa4d987881873a566b586c857bcd9180834bf4083f2" Nov 24 13:43:11 crc kubenswrapper[5039]: E1124 13:43:11.222867 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"485316d66402a90fce343fa4d987881873a566b586c857bcd9180834bf4083f2\": container with ID starting with 485316d66402a90fce343fa4d987881873a566b586c857bcd9180834bf4083f2 not found: ID does not exist" containerID="485316d66402a90fce343fa4d987881873a566b586c857bcd9180834bf4083f2" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.222919 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"485316d66402a90fce343fa4d987881873a566b586c857bcd9180834bf4083f2"} err="failed to get container status \"485316d66402a90fce343fa4d987881873a566b586c857bcd9180834bf4083f2\": rpc error: code = NotFound desc = could not find container \"485316d66402a90fce343fa4d987881873a566b586c857bcd9180834bf4083f2\": container with ID starting with 485316d66402a90fce343fa4d987881873a566b586c857bcd9180834bf4083f2 not found: ID does not exist" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.222955 5039 scope.go:117] "RemoveContainer" containerID="79ed7e8384e791de05e91f18b762eadd3466a7bd5f526803d193c69153cd418d" Nov 24 13:43:11 crc kubenswrapper[5039]: E1124 13:43:11.223317 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79ed7e8384e791de05e91f18b762eadd3466a7bd5f526803d193c69153cd418d\": container with ID starting with 79ed7e8384e791de05e91f18b762eadd3466a7bd5f526803d193c69153cd418d not found: ID does not exist" containerID="79ed7e8384e791de05e91f18b762eadd3466a7bd5f526803d193c69153cd418d" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.223341 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79ed7e8384e791de05e91f18b762eadd3466a7bd5f526803d193c69153cd418d"} err="failed to get container status \"79ed7e8384e791de05e91f18b762eadd3466a7bd5f526803d193c69153cd418d\": rpc error: code = NotFound desc = could not find container \"79ed7e8384e791de05e91f18b762eadd3466a7bd5f526803d193c69153cd418d\": container with ID starting with 79ed7e8384e791de05e91f18b762eadd3466a7bd5f526803d193c69153cd418d not found: ID does not exist" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.223354 5039 scope.go:117] "RemoveContainer" containerID="5a2eb444843b3cbf316ddee87e72fa637ebe34b8691a6784f88fd000126d4199" Nov 24 13:43:11 crc kubenswrapper[5039]: E1124 13:43:11.224280 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a2eb444843b3cbf316ddee87e72fa637ebe34b8691a6784f88fd000126d4199\": container with ID starting with 5a2eb444843b3cbf316ddee87e72fa637ebe34b8691a6784f88fd000126d4199 not found: ID does not exist" containerID="5a2eb444843b3cbf316ddee87e72fa637ebe34b8691a6784f88fd000126d4199" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.224319 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a2eb444843b3cbf316ddee87e72fa637ebe34b8691a6784f88fd000126d4199"} err="failed to get container status \"5a2eb444843b3cbf316ddee87e72fa637ebe34b8691a6784f88fd000126d4199\": rpc error: code = NotFound desc = could not find container \"5a2eb444843b3cbf316ddee87e72fa637ebe34b8691a6784f88fd000126d4199\": container with ID starting with 5a2eb444843b3cbf316ddee87e72fa637ebe34b8691a6784f88fd000126d4199 not found: ID does not exist" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.224345 5039 scope.go:117] "RemoveContainer" containerID="1fc155fd6c20a08dc1d830dfb10c1e6a7031af7d1ac70dfa1320ea045ec36c15" Nov 24 13:43:11 crc kubenswrapper[5039]: E1124 13:43:11.224623 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1fc155fd6c20a08dc1d830dfb10c1e6a7031af7d1ac70dfa1320ea045ec36c15\": container with ID starting with 1fc155fd6c20a08dc1d830dfb10c1e6a7031af7d1ac70dfa1320ea045ec36c15 not found: ID does not exist" containerID="1fc155fd6c20a08dc1d830dfb10c1e6a7031af7d1ac70dfa1320ea045ec36c15" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.224647 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fc155fd6c20a08dc1d830dfb10c1e6a7031af7d1ac70dfa1320ea045ec36c15"} err="failed to get container status \"1fc155fd6c20a08dc1d830dfb10c1e6a7031af7d1ac70dfa1320ea045ec36c15\": rpc error: code = NotFound desc = could not find container \"1fc155fd6c20a08dc1d830dfb10c1e6a7031af7d1ac70dfa1320ea045ec36c15\": container with ID starting with 1fc155fd6c20a08dc1d830dfb10c1e6a7031af7d1ac70dfa1320ea045ec36c15 not found: ID does not exist" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.336076 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-scripts\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.336169 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3e57550d-0004-4f59-882e-557913349848-log-httpd\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.336219 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-config-data\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.336319 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qcxmt\" (UniqueName: \"kubernetes.io/projected/3e57550d-0004-4f59-882e-557913349848-kube-api-access-qcxmt\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.336438 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3e57550d-0004-4f59-882e-557913349848-run-httpd\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.336555 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.336648 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.336749 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.438054 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-scripts\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.438130 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3e57550d-0004-4f59-882e-557913349848-log-httpd\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.438176 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-config-data\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.438291 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxmt\" (UniqueName: \"kubernetes.io/projected/3e57550d-0004-4f59-882e-557913349848-kube-api-access-qcxmt\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.438366 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3e57550d-0004-4f59-882e-557913349848-run-httpd\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.438384 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.438430 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.438470 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.438730 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3e57550d-0004-4f59-882e-557913349848-log-httpd\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.439139 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3e57550d-0004-4f59-882e-557913349848-run-httpd\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.441906 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-scripts\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.442277 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.442362 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-config-data\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.443190 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.445162 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.456114 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcxmt\" (UniqueName: \"kubernetes.io/projected/3e57550d-0004-4f59-882e-557913349848-kube-api-access-qcxmt\") pod \"ceilometer-0\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.476921 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.945057 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:11 crc kubenswrapper[5039]: I1124 13:43:11.988281 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 24 13:43:12 crc kubenswrapper[5039]: I1124 13:43:12.082167 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3e57550d-0004-4f59-882e-557913349848","Type":"ContainerStarted","Data":"c29cf48c0649be699f3564de352af89816933499016e10bc574d5cc98d711de2"} Nov 24 13:43:12 crc kubenswrapper[5039]: I1124 13:43:12.319832 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a180464-260b-456c-bb96-c83c69cd2258" path="/var/lib/kubelet/pods/2a180464-260b-456c-bb96-c83c69cd2258/volumes" Nov 24 13:43:13 crc kubenswrapper[5039]: I1124 13:43:13.094924 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3e57550d-0004-4f59-882e-557913349848","Type":"ContainerStarted","Data":"6cab88ab1dd4d6b5b29438fcfa20b3f8bc01e77ed5bce0f58d8e1e900e136d70"} Nov 24 13:43:13 crc kubenswrapper[5039]: I1124 13:43:13.537450 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 24 13:43:13 crc kubenswrapper[5039]: I1124 13:43:13.544964 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 24 13:43:13 crc kubenswrapper[5039]: I1124 13:43:13.547570 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 24 13:43:14 crc kubenswrapper[5039]: I1124 13:43:14.107853 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3e57550d-0004-4f59-882e-557913349848","Type":"ContainerStarted","Data":"14a23fd2b0668280c9cff5bff206b8f2064163145c8d6c72da534b54843c8513"} Nov 24 13:43:14 crc kubenswrapper[5039]: I1124 13:43:14.108184 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3e57550d-0004-4f59-882e-557913349848","Type":"ContainerStarted","Data":"f9d3f3b2392c27e3e34fb8adadf1e5bf2ac1c29f088fcee9fc688fba80c682da"} Nov 24 13:43:14 crc kubenswrapper[5039]: I1124 13:43:14.115378 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 24 13:43:15 crc kubenswrapper[5039]: I1124 13:43:15.372595 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 24 13:43:15 crc kubenswrapper[5039]: I1124 13:43:15.373622 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 24 13:43:15 crc kubenswrapper[5039]: I1124 13:43:15.376892 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 24 13:43:15 crc kubenswrapper[5039]: I1124 13:43:15.377046 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 24 13:43:15 crc kubenswrapper[5039]: I1124 13:43:15.953068 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.039051 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mrg8z\" (UniqueName: \"kubernetes.io/projected/8e0d43dd-60a3-4898-b9ef-b2377a357dee-kube-api-access-mrg8z\") pod \"8e0d43dd-60a3-4898-b9ef-b2377a357dee\" (UID: \"8e0d43dd-60a3-4898-b9ef-b2377a357dee\") " Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.039148 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e0d43dd-60a3-4898-b9ef-b2377a357dee-config-data\") pod \"8e0d43dd-60a3-4898-b9ef-b2377a357dee\" (UID: \"8e0d43dd-60a3-4898-b9ef-b2377a357dee\") " Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.039338 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e0d43dd-60a3-4898-b9ef-b2377a357dee-combined-ca-bundle\") pod \"8e0d43dd-60a3-4898-b9ef-b2377a357dee\" (UID: \"8e0d43dd-60a3-4898-b9ef-b2377a357dee\") " Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.044819 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e0d43dd-60a3-4898-b9ef-b2377a357dee-kube-api-access-mrg8z" (OuterVolumeSpecName: "kube-api-access-mrg8z") pod "8e0d43dd-60a3-4898-b9ef-b2377a357dee" (UID: "8e0d43dd-60a3-4898-b9ef-b2377a357dee"). InnerVolumeSpecName "kube-api-access-mrg8z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.071706 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e0d43dd-60a3-4898-b9ef-b2377a357dee-config-data" (OuterVolumeSpecName: "config-data") pod "8e0d43dd-60a3-4898-b9ef-b2377a357dee" (UID: "8e0d43dd-60a3-4898-b9ef-b2377a357dee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.076082 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e0d43dd-60a3-4898-b9ef-b2377a357dee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8e0d43dd-60a3-4898-b9ef-b2377a357dee" (UID: "8e0d43dd-60a3-4898-b9ef-b2377a357dee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.141908 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3e57550d-0004-4f59-882e-557913349848","Type":"ContainerStarted","Data":"c8e6ca3373445b885495c93b8a540a08075131dd92cd669c4eafd8d93e3f329a"} Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.143540 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.144670 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e0d43dd-60a3-4898-b9ef-b2377a357dee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.144715 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mrg8z\" (UniqueName: \"kubernetes.io/projected/8e0d43dd-60a3-4898-b9ef-b2377a357dee-kube-api-access-mrg8z\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.144729 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e0d43dd-60a3-4898-b9ef-b2377a357dee-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.147549 5039 generic.go:334] "Generic (PLEG): container finished" podID="8e0d43dd-60a3-4898-b9ef-b2377a357dee" containerID="e7a7ba5c49f56245d5f301800daf3e5f4976fdcf6558b6a705ef6b177a1a7214" exitCode=137 Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.147630 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"8e0d43dd-60a3-4898-b9ef-b2377a357dee","Type":"ContainerDied","Data":"e7a7ba5c49f56245d5f301800daf3e5f4976fdcf6558b6a705ef6b177a1a7214"} Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.147667 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"8e0d43dd-60a3-4898-b9ef-b2377a357dee","Type":"ContainerDied","Data":"7447c540250dd333b391d415e1dcde27777e50eb15c922d48a763cba4cd242a2"} Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.147683 5039 scope.go:117] "RemoveContainer" containerID="e7a7ba5c49f56245d5f301800daf3e5f4976fdcf6558b6a705ef6b177a1a7214" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.147842 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.149027 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.156592 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.169060 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.083293107 podStartE2EDuration="5.169034143s" podCreationTimestamp="2025-11-24 13:43:11 +0000 UTC" firstStartedPulling="2025-11-24 13:43:11.946660002 +0000 UTC m=+1504.385784512" lastFinishedPulling="2025-11-24 13:43:15.032401048 +0000 UTC m=+1507.471525548" observedRunningTime="2025-11-24 13:43:16.166786197 +0000 UTC m=+1508.605919038" watchObservedRunningTime="2025-11-24 13:43:16.169034143 +0000 UTC m=+1508.608158643" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.190735 5039 scope.go:117] "RemoveContainer" containerID="e7a7ba5c49f56245d5f301800daf3e5f4976fdcf6558b6a705ef6b177a1a7214" Nov 24 13:43:16 crc kubenswrapper[5039]: E1124 13:43:16.191465 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7a7ba5c49f56245d5f301800daf3e5f4976fdcf6558b6a705ef6b177a1a7214\": container with ID starting with e7a7ba5c49f56245d5f301800daf3e5f4976fdcf6558b6a705ef6b177a1a7214 not found: ID does not exist" containerID="e7a7ba5c49f56245d5f301800daf3e5f4976fdcf6558b6a705ef6b177a1a7214" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.191533 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7a7ba5c49f56245d5f301800daf3e5f4976fdcf6558b6a705ef6b177a1a7214"} err="failed to get container status \"e7a7ba5c49f56245d5f301800daf3e5f4976fdcf6558b6a705ef6b177a1a7214\": rpc error: code = NotFound desc = could not find container \"e7a7ba5c49f56245d5f301800daf3e5f4976fdcf6558b6a705ef6b177a1a7214\": container with ID starting with e7a7ba5c49f56245d5f301800daf3e5f4976fdcf6558b6a705ef6b177a1a7214 not found: ID does not exist" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.219651 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.236677 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.251359 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 13:43:16 crc kubenswrapper[5039]: E1124 13:43:16.251823 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e0d43dd-60a3-4898-b9ef-b2377a357dee" containerName="nova-cell1-novncproxy-novncproxy" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.251851 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e0d43dd-60a3-4898-b9ef-b2377a357dee" containerName="nova-cell1-novncproxy-novncproxy" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.252119 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e0d43dd-60a3-4898-b9ef-b2377a357dee" containerName="nova-cell1-novncproxy-novncproxy" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.252940 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.264974 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.267080 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.267305 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.267452 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.331334 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e0d43dd-60a3-4898-b9ef-b2377a357dee" path="/var/lib/kubelet/pods/8e0d43dd-60a3-4898-b9ef-b2377a357dee/volumes" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.361299 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bcc04e6-8265-45a0-9883-cf6831c72a9c-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"8bcc04e6-8265-45a0-9883-cf6831c72a9c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.361365 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sw9m2\" (UniqueName: \"kubernetes.io/projected/8bcc04e6-8265-45a0-9883-cf6831c72a9c-kube-api-access-sw9m2\") pod \"nova-cell1-novncproxy-0\" (UID: \"8bcc04e6-8265-45a0-9883-cf6831c72a9c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.361547 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bcc04e6-8265-45a0-9883-cf6831c72a9c-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"8bcc04e6-8265-45a0-9883-cf6831c72a9c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.361578 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bcc04e6-8265-45a0-9883-cf6831c72a9c-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"8bcc04e6-8265-45a0-9883-cf6831c72a9c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.361613 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bcc04e6-8265-45a0-9883-cf6831c72a9c-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"8bcc04e6-8265-45a0-9883-cf6831c72a9c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.364998 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-wpzfw"] Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.367753 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.386774 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-wpzfw"] Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.463270 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bcc04e6-8265-45a0-9883-cf6831c72a9c-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"8bcc04e6-8265-45a0-9883-cf6831c72a9c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.463325 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bcc04e6-8265-45a0-9883-cf6831c72a9c-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"8bcc04e6-8265-45a0-9883-cf6831c72a9c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.463359 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bcc04e6-8265-45a0-9883-cf6831c72a9c-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"8bcc04e6-8265-45a0-9883-cf6831c72a9c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.463433 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-wpzfw\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.463477 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-wpzfw\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.463518 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bcc04e6-8265-45a0-9883-cf6831c72a9c-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"8bcc04e6-8265-45a0-9883-cf6831c72a9c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.463538 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-wpzfw\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.463558 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sw9m2\" (UniqueName: \"kubernetes.io/projected/8bcc04e6-8265-45a0-9883-cf6831c72a9c-kube-api-access-sw9m2\") pod \"nova-cell1-novncproxy-0\" (UID: \"8bcc04e6-8265-45a0-9883-cf6831c72a9c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.463590 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-wpzfw\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.463656 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-config\") pod \"dnsmasq-dns-79b5d74c8c-wpzfw\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.463674 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s44ds\" (UniqueName: \"kubernetes.io/projected/66f89de8-d3eb-4aa2-a537-e4a768c732dd-kube-api-access-s44ds\") pod \"dnsmasq-dns-79b5d74c8c-wpzfw\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.469784 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bcc04e6-8265-45a0-9883-cf6831c72a9c-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"8bcc04e6-8265-45a0-9883-cf6831c72a9c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.473683 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bcc04e6-8265-45a0-9883-cf6831c72a9c-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"8bcc04e6-8265-45a0-9883-cf6831c72a9c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.476075 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bcc04e6-8265-45a0-9883-cf6831c72a9c-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"8bcc04e6-8265-45a0-9883-cf6831c72a9c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.489555 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bcc04e6-8265-45a0-9883-cf6831c72a9c-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"8bcc04e6-8265-45a0-9883-cf6831c72a9c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.494069 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sw9m2\" (UniqueName: \"kubernetes.io/projected/8bcc04e6-8265-45a0-9883-cf6831c72a9c-kube-api-access-sw9m2\") pod \"nova-cell1-novncproxy-0\" (UID: \"8bcc04e6-8265-45a0-9883-cf6831c72a9c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.565857 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-wpzfw\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.565928 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-wpzfw\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.565972 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-wpzfw\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.566068 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-config\") pod \"dnsmasq-dns-79b5d74c8c-wpzfw\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.566098 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s44ds\" (UniqueName: \"kubernetes.io/projected/66f89de8-d3eb-4aa2-a537-e4a768c732dd-kube-api-access-s44ds\") pod \"dnsmasq-dns-79b5d74c8c-wpzfw\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.566241 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-wpzfw\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.571424 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-wpzfw\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.572785 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-config\") pod \"dnsmasq-dns-79b5d74c8c-wpzfw\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.573361 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-wpzfw\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.575884 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-wpzfw\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.577465 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-wpzfw\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.596018 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.608198 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s44ds\" (UniqueName: \"kubernetes.io/projected/66f89de8-d3eb-4aa2-a537-e4a768c732dd-kube-api-access-s44ds\") pod \"dnsmasq-dns-79b5d74c8c-wpzfw\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:16 crc kubenswrapper[5039]: I1124 13:43:16.693141 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:17 crc kubenswrapper[5039]: I1124 13:43:17.194876 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 13:43:17 crc kubenswrapper[5039]: I1124 13:43:17.335730 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-wpzfw"] Nov 24 13:43:18 crc kubenswrapper[5039]: I1124 13:43:18.203945 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"8bcc04e6-8265-45a0-9883-cf6831c72a9c","Type":"ContainerStarted","Data":"2e6775965cfe8dfc876017fc4bedbd86034c12ad5f53fa7673c2472943d170fd"} Nov 24 13:43:18 crc kubenswrapper[5039]: I1124 13:43:18.208097 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"8bcc04e6-8265-45a0-9883-cf6831c72a9c","Type":"ContainerStarted","Data":"3fffc8b32a0bd2d0d62962e55baadc55fd31c9d2e6b88278372eac9a0fd77bcb"} Nov 24 13:43:18 crc kubenswrapper[5039]: I1124 13:43:18.210188 5039 generic.go:334] "Generic (PLEG): container finished" podID="66f89de8-d3eb-4aa2-a537-e4a768c732dd" containerID="a2cd4de79a44bd132f5eaeed5f34575cdeb6a114ea2f3883903a4682d0ff8820" exitCode=0 Nov 24 13:43:18 crc kubenswrapper[5039]: I1124 13:43:18.210553 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" event={"ID":"66f89de8-d3eb-4aa2-a537-e4a768c732dd","Type":"ContainerDied","Data":"a2cd4de79a44bd132f5eaeed5f34575cdeb6a114ea2f3883903a4682d0ff8820"} Nov 24 13:43:18 crc kubenswrapper[5039]: I1124 13:43:18.210594 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" event={"ID":"66f89de8-d3eb-4aa2-a537-e4a768c732dd","Type":"ContainerStarted","Data":"6305ad22819f32cff50ebd356bd036d794dc062007a4b1a16e19a2e1ec1d1007"} Nov 24 13:43:18 crc kubenswrapper[5039]: I1124 13:43:18.249005 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.248984303 podStartE2EDuration="2.248984303s" podCreationTimestamp="2025-11-24 13:43:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:43:18.232132811 +0000 UTC m=+1510.671257321" watchObservedRunningTime="2025-11-24 13:43:18.248984303 +0000 UTC m=+1510.688108804" Nov 24 13:43:19 crc kubenswrapper[5039]: I1124 13:43:19.214883 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 24 13:43:19 crc kubenswrapper[5039]: I1124 13:43:19.222489 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" event={"ID":"66f89de8-d3eb-4aa2-a537-e4a768c732dd","Type":"ContainerStarted","Data":"a2f4ce3a7ab8c05c67eb8ed15d8d542de4d214b41ddc3ae0eda0977119faba80"} Nov 24 13:43:19 crc kubenswrapper[5039]: I1124 13:43:19.222866 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443" containerName="nova-api-log" containerID="cri-o://d3fb66e806b0ddadf9363c04962e0af179907d8f84096395d6cd1b010e804771" gracePeriod=30 Nov 24 13:43:19 crc kubenswrapper[5039]: I1124 13:43:19.222978 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443" containerName="nova-api-api" containerID="cri-o://224cfb9288ec9a52c6d1ef4b70b3fe444c88318831c75bd87a70ff638dca7a54" gracePeriod=30 Nov 24 13:43:19 crc kubenswrapper[5039]: I1124 13:43:19.258452 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" podStartSLOduration=3.258433758 podStartE2EDuration="3.258433758s" podCreationTimestamp="2025-11-24 13:43:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:43:19.251714313 +0000 UTC m=+1511.690838833" watchObservedRunningTime="2025-11-24 13:43:19.258433758 +0000 UTC m=+1511.697558258" Nov 24 13:43:20 crc kubenswrapper[5039]: I1124 13:43:20.101381 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:43:20 crc kubenswrapper[5039]: I1124 13:43:20.101786 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:43:20 crc kubenswrapper[5039]: I1124 13:43:20.101844 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:43:20 crc kubenswrapper[5039]: I1124 13:43:20.102762 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 13:43:20 crc kubenswrapper[5039]: I1124 13:43:20.102835 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" gracePeriod=600 Nov 24 13:43:20 crc kubenswrapper[5039]: E1124 13:43:20.233812 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:43:20 crc kubenswrapper[5039]: I1124 13:43:20.263854 5039 generic.go:334] "Generic (PLEG): container finished" podID="f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443" containerID="d3fb66e806b0ddadf9363c04962e0af179907d8f84096395d6cd1b010e804771" exitCode=143 Nov 24 13:43:20 crc kubenswrapper[5039]: I1124 13:43:20.264121 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443","Type":"ContainerDied","Data":"d3fb66e806b0ddadf9363c04962e0af179907d8f84096395d6cd1b010e804771"} Nov 24 13:43:20 crc kubenswrapper[5039]: I1124 13:43:20.288249 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" exitCode=0 Nov 24 13:43:20 crc kubenswrapper[5039]: I1124 13:43:20.289382 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133"} Nov 24 13:43:20 crc kubenswrapper[5039]: I1124 13:43:20.289475 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:20 crc kubenswrapper[5039]: I1124 13:43:20.289574 5039 scope.go:117] "RemoveContainer" containerID="f993c951919012dcf982065d331337a1627947abef22ad885fe48114cf5620d5" Nov 24 13:43:20 crc kubenswrapper[5039]: I1124 13:43:20.290240 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:43:20 crc kubenswrapper[5039]: E1124 13:43:20.290544 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:43:20 crc kubenswrapper[5039]: I1124 13:43:20.782849 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:20 crc kubenswrapper[5039]: I1124 13:43:20.783331 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3e57550d-0004-4f59-882e-557913349848" containerName="ceilometer-central-agent" containerID="cri-o://6cab88ab1dd4d6b5b29438fcfa20b3f8bc01e77ed5bce0f58d8e1e900e136d70" gracePeriod=30 Nov 24 13:43:20 crc kubenswrapper[5039]: I1124 13:43:20.783519 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3e57550d-0004-4f59-882e-557913349848" containerName="proxy-httpd" containerID="cri-o://c8e6ca3373445b885495c93b8a540a08075131dd92cd669c4eafd8d93e3f329a" gracePeriod=30 Nov 24 13:43:20 crc kubenswrapper[5039]: I1124 13:43:20.783593 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3e57550d-0004-4f59-882e-557913349848" containerName="sg-core" containerID="cri-o://14a23fd2b0668280c9cff5bff206b8f2064163145c8d6c72da534b54843c8513" gracePeriod=30 Nov 24 13:43:20 crc kubenswrapper[5039]: I1124 13:43:20.783724 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3e57550d-0004-4f59-882e-557913349848" containerName="ceilometer-notification-agent" containerID="cri-o://f9d3f3b2392c27e3e34fb8adadf1e5bf2ac1c29f088fcee9fc688fba80c682da" gracePeriod=30 Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.306693 5039 generic.go:334] "Generic (PLEG): container finished" podID="3e57550d-0004-4f59-882e-557913349848" containerID="c8e6ca3373445b885495c93b8a540a08075131dd92cd669c4eafd8d93e3f329a" exitCode=0 Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.306724 5039 generic.go:334] "Generic (PLEG): container finished" podID="3e57550d-0004-4f59-882e-557913349848" containerID="14a23fd2b0668280c9cff5bff206b8f2064163145c8d6c72da534b54843c8513" exitCode=2 Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.306731 5039 generic.go:334] "Generic (PLEG): container finished" podID="3e57550d-0004-4f59-882e-557913349848" containerID="f9d3f3b2392c27e3e34fb8adadf1e5bf2ac1c29f088fcee9fc688fba80c682da" exitCode=0 Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.306737 5039 generic.go:334] "Generic (PLEG): container finished" podID="3e57550d-0004-4f59-882e-557913349848" containerID="6cab88ab1dd4d6b5b29438fcfa20b3f8bc01e77ed5bce0f58d8e1e900e136d70" exitCode=0 Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.306785 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3e57550d-0004-4f59-882e-557913349848","Type":"ContainerDied","Data":"c8e6ca3373445b885495c93b8a540a08075131dd92cd669c4eafd8d93e3f329a"} Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.306833 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3e57550d-0004-4f59-882e-557913349848","Type":"ContainerDied","Data":"14a23fd2b0668280c9cff5bff206b8f2064163145c8d6c72da534b54843c8513"} Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.306847 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3e57550d-0004-4f59-882e-557913349848","Type":"ContainerDied","Data":"f9d3f3b2392c27e3e34fb8adadf1e5bf2ac1c29f088fcee9fc688fba80c682da"} Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.306862 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3e57550d-0004-4f59-882e-557913349848","Type":"ContainerDied","Data":"6cab88ab1dd4d6b5b29438fcfa20b3f8bc01e77ed5bce0f58d8e1e900e136d70"} Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.596955 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.687605 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.795273 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-ceilometer-tls-certs\") pod \"3e57550d-0004-4f59-882e-557913349848\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.795336 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3e57550d-0004-4f59-882e-557913349848-run-httpd\") pod \"3e57550d-0004-4f59-882e-557913349848\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.795405 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-sg-core-conf-yaml\") pod \"3e57550d-0004-4f59-882e-557913349848\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.795562 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-config-data\") pod \"3e57550d-0004-4f59-882e-557913349848\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.795643 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qcxmt\" (UniqueName: \"kubernetes.io/projected/3e57550d-0004-4f59-882e-557913349848-kube-api-access-qcxmt\") pod \"3e57550d-0004-4f59-882e-557913349848\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.795675 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-scripts\") pod \"3e57550d-0004-4f59-882e-557913349848\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.795755 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-combined-ca-bundle\") pod \"3e57550d-0004-4f59-882e-557913349848\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.795773 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3e57550d-0004-4f59-882e-557913349848-log-httpd\") pod \"3e57550d-0004-4f59-882e-557913349848\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.796613 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3e57550d-0004-4f59-882e-557913349848-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3e57550d-0004-4f59-882e-557913349848" (UID: "3e57550d-0004-4f59-882e-557913349848"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.796920 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3e57550d-0004-4f59-882e-557913349848-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3e57550d-0004-4f59-882e-557913349848" (UID: "3e57550d-0004-4f59-882e-557913349848"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.802682 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e57550d-0004-4f59-882e-557913349848-kube-api-access-qcxmt" (OuterVolumeSpecName: "kube-api-access-qcxmt") pod "3e57550d-0004-4f59-882e-557913349848" (UID: "3e57550d-0004-4f59-882e-557913349848"). InnerVolumeSpecName "kube-api-access-qcxmt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.802806 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-scripts" (OuterVolumeSpecName: "scripts") pod "3e57550d-0004-4f59-882e-557913349848" (UID: "3e57550d-0004-4f59-882e-557913349848"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.826197 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3e57550d-0004-4f59-882e-557913349848" (UID: "3e57550d-0004-4f59-882e-557913349848"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.859006 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "3e57550d-0004-4f59-882e-557913349848" (UID: "3e57550d-0004-4f59-882e-557913349848"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.899946 5039 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.899984 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qcxmt\" (UniqueName: \"kubernetes.io/projected/3e57550d-0004-4f59-882e-557913349848-kube-api-access-qcxmt\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.899994 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.900005 5039 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3e57550d-0004-4f59-882e-557913349848-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.900016 5039 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.900024 5039 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3e57550d-0004-4f59-882e-557913349848-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:21 crc kubenswrapper[5039]: E1124 13:43:21.904975 5039 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-config-data podName:3e57550d-0004-4f59-882e-557913349848 nodeName:}" failed. No retries permitted until 2025-11-24 13:43:22.404946263 +0000 UTC m=+1514.844070753 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "config-data" (UniqueName: "kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-config-data") pod "3e57550d-0004-4f59-882e-557913349848" (UID: "3e57550d-0004-4f59-882e-557913349848") : error deleting /var/lib/kubelet/pods/3e57550d-0004-4f59-882e-557913349848/volume-subpaths: remove /var/lib/kubelet/pods/3e57550d-0004-4f59-882e-557913349848/volume-subpaths: no such file or directory Nov 24 13:43:21 crc kubenswrapper[5039]: I1124 13:43:21.908051 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3e57550d-0004-4f59-882e-557913349848" (UID: "3e57550d-0004-4f59-882e-557913349848"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.001795 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.339357 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3e57550d-0004-4f59-882e-557913349848","Type":"ContainerDied","Data":"c29cf48c0649be699f3564de352af89816933499016e10bc574d5cc98d711de2"} Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.339420 5039 scope.go:117] "RemoveContainer" containerID="c8e6ca3373445b885495c93b8a540a08075131dd92cd669c4eafd8d93e3f329a" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.339453 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.365571 5039 scope.go:117] "RemoveContainer" containerID="14a23fd2b0668280c9cff5bff206b8f2064163145c8d6c72da534b54843c8513" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.386070 5039 scope.go:117] "RemoveContainer" containerID="f9d3f3b2392c27e3e34fb8adadf1e5bf2ac1c29f088fcee9fc688fba80c682da" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.412517 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-config-data\") pod \"3e57550d-0004-4f59-882e-557913349848\" (UID: \"3e57550d-0004-4f59-882e-557913349848\") " Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.415848 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-config-data" (OuterVolumeSpecName: "config-data") pod "3e57550d-0004-4f59-882e-557913349848" (UID: "3e57550d-0004-4f59-882e-557913349848"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.515481 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e57550d-0004-4f59-882e-557913349848-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.542911 5039 scope.go:117] "RemoveContainer" containerID="6cab88ab1dd4d6b5b29438fcfa20b3f8bc01e77ed5bce0f58d8e1e900e136d70" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.690399 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.703928 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.717631 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:22 crc kubenswrapper[5039]: E1124 13:43:22.718379 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e57550d-0004-4f59-882e-557913349848" containerName="proxy-httpd" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.718392 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e57550d-0004-4f59-882e-557913349848" containerName="proxy-httpd" Nov 24 13:43:22 crc kubenswrapper[5039]: E1124 13:43:22.718425 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e57550d-0004-4f59-882e-557913349848" containerName="ceilometer-notification-agent" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.718432 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e57550d-0004-4f59-882e-557913349848" containerName="ceilometer-notification-agent" Nov 24 13:43:22 crc kubenswrapper[5039]: E1124 13:43:22.718454 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e57550d-0004-4f59-882e-557913349848" containerName="sg-core" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.718460 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e57550d-0004-4f59-882e-557913349848" containerName="sg-core" Nov 24 13:43:22 crc kubenswrapper[5039]: E1124 13:43:22.718476 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e57550d-0004-4f59-882e-557913349848" containerName="ceilometer-central-agent" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.718482 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e57550d-0004-4f59-882e-557913349848" containerName="ceilometer-central-agent" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.718689 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e57550d-0004-4f59-882e-557913349848" containerName="ceilometer-central-agent" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.718704 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e57550d-0004-4f59-882e-557913349848" containerName="sg-core" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.718718 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e57550d-0004-4f59-882e-557913349848" containerName="proxy-httpd" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.718732 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e57550d-0004-4f59-882e-557913349848" containerName="ceilometer-notification-agent" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.725775 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.726571 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.732028 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.732368 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.732551 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.821728 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.821796 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-scripts\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.821813 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-config-data\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.821863 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.821965 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rdhq\" (UniqueName: \"kubernetes.io/projected/523ac154-3c03-4930-8729-874c0e056d14-kube-api-access-7rdhq\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.821984 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/523ac154-3c03-4930-8729-874c0e056d14-log-httpd\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.822043 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/523ac154-3c03-4930-8729-874c0e056d14-run-httpd\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.822068 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.844561 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.928171 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-config-data\") pod \"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443\" (UID: \"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443\") " Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.928387 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-logs\") pod \"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443\" (UID: \"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443\") " Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.928464 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-combined-ca-bundle\") pod \"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443\" (UID: \"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443\") " Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.928580 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcwbn\" (UniqueName: \"kubernetes.io/projected/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-kube-api-access-fcwbn\") pod \"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443\" (UID: \"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443\") " Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.928999 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-logs" (OuterVolumeSpecName: "logs") pod "f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443" (UID: "f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.929550 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rdhq\" (UniqueName: \"kubernetes.io/projected/523ac154-3c03-4930-8729-874c0e056d14-kube-api-access-7rdhq\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.929588 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/523ac154-3c03-4930-8729-874c0e056d14-log-httpd\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.929671 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/523ac154-3c03-4930-8729-874c0e056d14-run-httpd\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.929705 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.929753 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.929792 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-scripts\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.929815 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-config-data\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.929877 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.930015 5039 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-logs\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.932898 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/523ac154-3c03-4930-8729-874c0e056d14-log-httpd\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.933302 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-kube-api-access-fcwbn" (OuterVolumeSpecName: "kube-api-access-fcwbn") pod "f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443" (UID: "f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443"). InnerVolumeSpecName "kube-api-access-fcwbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.933476 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/523ac154-3c03-4930-8729-874c0e056d14-run-httpd\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.940273 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-scripts\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.940610 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.942289 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.947231 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-config-data\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.947435 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.969674 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rdhq\" (UniqueName: \"kubernetes.io/projected/523ac154-3c03-4930-8729-874c0e056d14-kube-api-access-7rdhq\") pod \"ceilometer-0\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.984196 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-config-data" (OuterVolumeSpecName: "config-data") pod "f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443" (UID: "f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.988375 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.989225 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:43:22 crc kubenswrapper[5039]: I1124 13:43:22.992021 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443" (UID: "f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.032764 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.032807 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcwbn\" (UniqueName: \"kubernetes.io/projected/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-kube-api-access-fcwbn\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.032821 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.415020 5039 generic.go:334] "Generic (PLEG): container finished" podID="f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443" containerID="224cfb9288ec9a52c6d1ef4b70b3fe444c88318831c75bd87a70ff638dca7a54" exitCode=0 Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.415102 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.415124 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443","Type":"ContainerDied","Data":"224cfb9288ec9a52c6d1ef4b70b3fe444c88318831c75bd87a70ff638dca7a54"} Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.415620 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443","Type":"ContainerDied","Data":"f26ee9d9a1b9d2100d6527528a4c5e6e0e28cd8b94e88143ad37749aa4ad9758"} Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.415650 5039 scope.go:117] "RemoveContainer" containerID="224cfb9288ec9a52c6d1ef4b70b3fe444c88318831c75bd87a70ff638dca7a54" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.466176 5039 scope.go:117] "RemoveContainer" containerID="d3fb66e806b0ddadf9363c04962e0af179907d8f84096395d6cd1b010e804771" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.470580 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.489872 5039 scope.go:117] "RemoveContainer" containerID="224cfb9288ec9a52c6d1ef4b70b3fe444c88318831c75bd87a70ff638dca7a54" Nov 24 13:43:23 crc kubenswrapper[5039]: E1124 13:43:23.490339 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"224cfb9288ec9a52c6d1ef4b70b3fe444c88318831c75bd87a70ff638dca7a54\": container with ID starting with 224cfb9288ec9a52c6d1ef4b70b3fe444c88318831c75bd87a70ff638dca7a54 not found: ID does not exist" containerID="224cfb9288ec9a52c6d1ef4b70b3fe444c88318831c75bd87a70ff638dca7a54" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.490382 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"224cfb9288ec9a52c6d1ef4b70b3fe444c88318831c75bd87a70ff638dca7a54"} err="failed to get container status \"224cfb9288ec9a52c6d1ef4b70b3fe444c88318831c75bd87a70ff638dca7a54\": rpc error: code = NotFound desc = could not find container \"224cfb9288ec9a52c6d1ef4b70b3fe444c88318831c75bd87a70ff638dca7a54\": container with ID starting with 224cfb9288ec9a52c6d1ef4b70b3fe444c88318831c75bd87a70ff638dca7a54 not found: ID does not exist" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.490412 5039 scope.go:117] "RemoveContainer" containerID="d3fb66e806b0ddadf9363c04962e0af179907d8f84096395d6cd1b010e804771" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.492454 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 24 13:43:23 crc kubenswrapper[5039]: E1124 13:43:23.493927 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3fb66e806b0ddadf9363c04962e0af179907d8f84096395d6cd1b010e804771\": container with ID starting with d3fb66e806b0ddadf9363c04962e0af179907d8f84096395d6cd1b010e804771 not found: ID does not exist" containerID="d3fb66e806b0ddadf9363c04962e0af179907d8f84096395d6cd1b010e804771" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.493957 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3fb66e806b0ddadf9363c04962e0af179907d8f84096395d6cd1b010e804771"} err="failed to get container status \"d3fb66e806b0ddadf9363c04962e0af179907d8f84096395d6cd1b010e804771\": rpc error: code = NotFound desc = could not find container \"d3fb66e806b0ddadf9363c04962e0af179907d8f84096395d6cd1b010e804771\": container with ID starting with d3fb66e806b0ddadf9363c04962e0af179907d8f84096395d6cd1b010e804771 not found: ID does not exist" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.520297 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 24 13:43:23 crc kubenswrapper[5039]: E1124 13:43:23.520845 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443" containerName="nova-api-log" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.520869 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443" containerName="nova-api-log" Nov 24 13:43:23 crc kubenswrapper[5039]: E1124 13:43:23.520894 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443" containerName="nova-api-api" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.520902 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443" containerName="nova-api-api" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.521162 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443" containerName="nova-api-log" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.521184 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443" containerName="nova-api-api" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.522361 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.527840 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.527946 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.528028 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.544538 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.570913 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.656403 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " pod="openstack/nova-api-0" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.656554 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-public-tls-certs\") pod \"nova-api-0\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " pod="openstack/nova-api-0" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.656673 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-config-data\") pod \"nova-api-0\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " pod="openstack/nova-api-0" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.656872 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/770f9659-47d8-4af2-bc52-18fa13b7d10e-logs\") pod \"nova-api-0\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " pod="openstack/nova-api-0" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.657115 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-internal-tls-certs\") pod \"nova-api-0\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " pod="openstack/nova-api-0" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.657399 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zbdp\" (UniqueName: \"kubernetes.io/projected/770f9659-47d8-4af2-bc52-18fa13b7d10e-kube-api-access-4zbdp\") pod \"nova-api-0\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " pod="openstack/nova-api-0" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.759715 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/770f9659-47d8-4af2-bc52-18fa13b7d10e-logs\") pod \"nova-api-0\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " pod="openstack/nova-api-0" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.759800 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-internal-tls-certs\") pod \"nova-api-0\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " pod="openstack/nova-api-0" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.759865 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zbdp\" (UniqueName: \"kubernetes.io/projected/770f9659-47d8-4af2-bc52-18fa13b7d10e-kube-api-access-4zbdp\") pod \"nova-api-0\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " pod="openstack/nova-api-0" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.759918 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " pod="openstack/nova-api-0" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.759947 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-public-tls-certs\") pod \"nova-api-0\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " pod="openstack/nova-api-0" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.760066 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-config-data\") pod \"nova-api-0\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " pod="openstack/nova-api-0" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.760140 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/770f9659-47d8-4af2-bc52-18fa13b7d10e-logs\") pod \"nova-api-0\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " pod="openstack/nova-api-0" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.766930 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " pod="openstack/nova-api-0" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.767320 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-public-tls-certs\") pod \"nova-api-0\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " pod="openstack/nova-api-0" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.767646 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-config-data\") pod \"nova-api-0\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " pod="openstack/nova-api-0" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.772181 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-internal-tls-certs\") pod \"nova-api-0\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " pod="openstack/nova-api-0" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.778942 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zbdp\" (UniqueName: \"kubernetes.io/projected/770f9659-47d8-4af2-bc52-18fa13b7d10e-kube-api-access-4zbdp\") pod \"nova-api-0\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " pod="openstack/nova-api-0" Nov 24 13:43:23 crc kubenswrapper[5039]: I1124 13:43:23.886962 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 13:43:24 crc kubenswrapper[5039]: I1124 13:43:24.324166 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e57550d-0004-4f59-882e-557913349848" path="/var/lib/kubelet/pods/3e57550d-0004-4f59-882e-557913349848/volumes" Nov 24 13:43:24 crc kubenswrapper[5039]: I1124 13:43:24.326392 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443" path="/var/lib/kubelet/pods/f5f10fd8-c9e2-4da0-ba9d-e9bde2e75443/volumes" Nov 24 13:43:24 crc kubenswrapper[5039]: I1124 13:43:24.439381 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"523ac154-3c03-4930-8729-874c0e056d14","Type":"ContainerStarted","Data":"fdf1012c4f06085ededc4eb3766a497da190f9093412099b6e1481573650aa3b"} Nov 24 13:43:24 crc kubenswrapper[5039]: I1124 13:43:24.439425 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"523ac154-3c03-4930-8729-874c0e056d14","Type":"ContainerStarted","Data":"2ea30297207c57e95d688da7e755067f5bad57f51cedde36d24036deb7b19e92"} Nov 24 13:43:24 crc kubenswrapper[5039]: I1124 13:43:24.463003 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 13:43:25 crc kubenswrapper[5039]: I1124 13:43:25.467205 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"523ac154-3c03-4930-8729-874c0e056d14","Type":"ContainerStarted","Data":"2153ded787a942c651b7fe4c4920cee10e596508ee3e9c64ea97a21e6ed4ba5e"} Nov 24 13:43:25 crc kubenswrapper[5039]: I1124 13:43:25.469783 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"770f9659-47d8-4af2-bc52-18fa13b7d10e","Type":"ContainerStarted","Data":"e31fa4c95041a53a027f913246669668ea738a07cd4fb3a7b164e47c0b054f2d"} Nov 24 13:43:25 crc kubenswrapper[5039]: I1124 13:43:25.469816 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"770f9659-47d8-4af2-bc52-18fa13b7d10e","Type":"ContainerStarted","Data":"9668d8a8c6a8cc19fecb760b3a85686c868a613f96807f5b685bcc2aefe29046"} Nov 24 13:43:25 crc kubenswrapper[5039]: I1124 13:43:25.469829 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"770f9659-47d8-4af2-bc52-18fa13b7d10e","Type":"ContainerStarted","Data":"65cba305f72f81f1cc0940694b179bc6f4cb2ce829b790c23a85a54e62a07865"} Nov 24 13:43:25 crc kubenswrapper[5039]: I1124 13:43:25.501975 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.501953592 podStartE2EDuration="2.501953592s" podCreationTimestamp="2025-11-24 13:43:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:43:25.496889808 +0000 UTC m=+1517.936014308" watchObservedRunningTime="2025-11-24 13:43:25.501953592 +0000 UTC m=+1517.941078092" Nov 24 13:43:26 crc kubenswrapper[5039]: I1124 13:43:26.488790 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"523ac154-3c03-4930-8729-874c0e056d14","Type":"ContainerStarted","Data":"5976dd6cd4254023640ff24c7c1652c2245f6ef018a78fdf12ec71bfdb4263af"} Nov 24 13:43:26 crc kubenswrapper[5039]: I1124 13:43:26.597930 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:26 crc kubenswrapper[5039]: I1124 13:43:26.617186 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:26 crc kubenswrapper[5039]: I1124 13:43:26.695527 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:43:26 crc kubenswrapper[5039]: I1124 13:43:26.772898 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-tztwk"] Nov 24 13:43:26 crc kubenswrapper[5039]: I1124 13:43:26.773125 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" podUID="2b7e58ca-126f-4175-8e81-8311a1de04b4" containerName="dnsmasq-dns" containerID="cri-o://5f8fdd7de7be0c4c0d3edbfc3db5dfdac02d2a635b401e8ecfe235dd8f121017" gracePeriod=10 Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.357689 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.446026 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-dns-swift-storage-0\") pod \"2b7e58ca-126f-4175-8e81-8311a1de04b4\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.446129 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-ovsdbserver-nb\") pod \"2b7e58ca-126f-4175-8e81-8311a1de04b4\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.446187 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-dns-svc\") pod \"2b7e58ca-126f-4175-8e81-8311a1de04b4\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.446254 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-ovsdbserver-sb\") pod \"2b7e58ca-126f-4175-8e81-8311a1de04b4\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.446376 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-config\") pod \"2b7e58ca-126f-4175-8e81-8311a1de04b4\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.446430 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnf6s\" (UniqueName: \"kubernetes.io/projected/2b7e58ca-126f-4175-8e81-8311a1de04b4-kube-api-access-mnf6s\") pod \"2b7e58ca-126f-4175-8e81-8311a1de04b4\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.452720 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b7e58ca-126f-4175-8e81-8311a1de04b4-kube-api-access-mnf6s" (OuterVolumeSpecName: "kube-api-access-mnf6s") pod "2b7e58ca-126f-4175-8e81-8311a1de04b4" (UID: "2b7e58ca-126f-4175-8e81-8311a1de04b4"). InnerVolumeSpecName "kube-api-access-mnf6s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.511533 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"523ac154-3c03-4930-8729-874c0e056d14","Type":"ContainerStarted","Data":"04d55bc3cb1f9257bfa7fd3ade3173850a184e5c3f113e6f4b7cfbfdc062e01f"} Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.512027 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="523ac154-3c03-4930-8729-874c0e056d14" containerName="ceilometer-central-agent" containerID="cri-o://fdf1012c4f06085ededc4eb3766a497da190f9093412099b6e1481573650aa3b" gracePeriod=30 Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.512156 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.512713 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="523ac154-3c03-4930-8729-874c0e056d14" containerName="proxy-httpd" containerID="cri-o://04d55bc3cb1f9257bfa7fd3ade3173850a184e5c3f113e6f4b7cfbfdc062e01f" gracePeriod=30 Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.512850 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="523ac154-3c03-4930-8729-874c0e056d14" containerName="sg-core" containerID="cri-o://5976dd6cd4254023640ff24c7c1652c2245f6ef018a78fdf12ec71bfdb4263af" gracePeriod=30 Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.512888 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="523ac154-3c03-4930-8729-874c0e056d14" containerName="ceilometer-notification-agent" containerID="cri-o://2153ded787a942c651b7fe4c4920cee10e596508ee3e9c64ea97a21e6ed4ba5e" gracePeriod=30 Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.513851 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-config" (OuterVolumeSpecName: "config") pod "2b7e58ca-126f-4175-8e81-8311a1de04b4" (UID: "2b7e58ca-126f-4175-8e81-8311a1de04b4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.523851 5039 generic.go:334] "Generic (PLEG): container finished" podID="2b7e58ca-126f-4175-8e81-8311a1de04b4" containerID="5f8fdd7de7be0c4c0d3edbfc3db5dfdac02d2a635b401e8ecfe235dd8f121017" exitCode=0 Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.524588 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" event={"ID":"2b7e58ca-126f-4175-8e81-8311a1de04b4","Type":"ContainerDied","Data":"5f8fdd7de7be0c4c0d3edbfc3db5dfdac02d2a635b401e8ecfe235dd8f121017"} Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.524633 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.524658 5039 scope.go:117] "RemoveContainer" containerID="5f8fdd7de7be0c4c0d3edbfc3db5dfdac02d2a635b401e8ecfe235dd8f121017" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.524643 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-tztwk" event={"ID":"2b7e58ca-126f-4175-8e81-8311a1de04b4","Type":"ContainerDied","Data":"f9abc6cd4e5fe2cdb72b5744c49e753a52629d9d0366cf8caecccb5cae55d997"} Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.527547 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2b7e58ca-126f-4175-8e81-8311a1de04b4" (UID: "2b7e58ca-126f-4175-8e81-8311a1de04b4"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.542935 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2b7e58ca-126f-4175-8e81-8311a1de04b4" (UID: "2b7e58ca-126f-4175-8e81-8311a1de04b4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.547142 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.242814122 podStartE2EDuration="5.547124613s" podCreationTimestamp="2025-11-24 13:43:22 +0000 UTC" firstStartedPulling="2025-11-24 13:43:23.557054793 +0000 UTC m=+1515.996179293" lastFinishedPulling="2025-11-24 13:43:26.861365284 +0000 UTC m=+1519.300489784" observedRunningTime="2025-11-24 13:43:27.540415769 +0000 UTC m=+1519.979540269" watchObservedRunningTime="2025-11-24 13:43:27.547124613 +0000 UTC m=+1519.986249133" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.547942 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2b7e58ca-126f-4175-8e81-8311a1de04b4" (UID: "2b7e58ca-126f-4175-8e81-8311a1de04b4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.548054 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-ovsdbserver-sb\") pod \"2b7e58ca-126f-4175-8e81-8311a1de04b4\" (UID: \"2b7e58ca-126f-4175-8e81-8311a1de04b4\") " Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.548661 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.548682 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnf6s\" (UniqueName: \"kubernetes.io/projected/2b7e58ca-126f-4175-8e81-8311a1de04b4-kube-api-access-mnf6s\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.548694 5039 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.548702 5039 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:27 crc kubenswrapper[5039]: W1124 13:43:27.548773 5039 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/2b7e58ca-126f-4175-8e81-8311a1de04b4/volumes/kubernetes.io~configmap/ovsdbserver-sb Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.548783 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2b7e58ca-126f-4175-8e81-8311a1de04b4" (UID: "2b7e58ca-126f-4175-8e81-8311a1de04b4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.555064 5039 scope.go:117] "RemoveContainer" containerID="304b4dbf35849b99ad645a5fee388de877d819e3c2a42e78628f36290ca68d1d" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.555803 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2b7e58ca-126f-4175-8e81-8311a1de04b4" (UID: "2b7e58ca-126f-4175-8e81-8311a1de04b4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.562221 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.581668 5039 scope.go:117] "RemoveContainer" containerID="5f8fdd7de7be0c4c0d3edbfc3db5dfdac02d2a635b401e8ecfe235dd8f121017" Nov 24 13:43:27 crc kubenswrapper[5039]: E1124 13:43:27.582080 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f8fdd7de7be0c4c0d3edbfc3db5dfdac02d2a635b401e8ecfe235dd8f121017\": container with ID starting with 5f8fdd7de7be0c4c0d3edbfc3db5dfdac02d2a635b401e8ecfe235dd8f121017 not found: ID does not exist" containerID="5f8fdd7de7be0c4c0d3edbfc3db5dfdac02d2a635b401e8ecfe235dd8f121017" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.582127 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f8fdd7de7be0c4c0d3edbfc3db5dfdac02d2a635b401e8ecfe235dd8f121017"} err="failed to get container status \"5f8fdd7de7be0c4c0d3edbfc3db5dfdac02d2a635b401e8ecfe235dd8f121017\": rpc error: code = NotFound desc = could not find container \"5f8fdd7de7be0c4c0d3edbfc3db5dfdac02d2a635b401e8ecfe235dd8f121017\": container with ID starting with 5f8fdd7de7be0c4c0d3edbfc3db5dfdac02d2a635b401e8ecfe235dd8f121017 not found: ID does not exist" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.582157 5039 scope.go:117] "RemoveContainer" containerID="304b4dbf35849b99ad645a5fee388de877d819e3c2a42e78628f36290ca68d1d" Nov 24 13:43:27 crc kubenswrapper[5039]: E1124 13:43:27.585683 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"304b4dbf35849b99ad645a5fee388de877d819e3c2a42e78628f36290ca68d1d\": container with ID starting with 304b4dbf35849b99ad645a5fee388de877d819e3c2a42e78628f36290ca68d1d not found: ID does not exist" containerID="304b4dbf35849b99ad645a5fee388de877d819e3c2a42e78628f36290ca68d1d" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.585818 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"304b4dbf35849b99ad645a5fee388de877d819e3c2a42e78628f36290ca68d1d"} err="failed to get container status \"304b4dbf35849b99ad645a5fee388de877d819e3c2a42e78628f36290ca68d1d\": rpc error: code = NotFound desc = could not find container \"304b4dbf35849b99ad645a5fee388de877d819e3c2a42e78628f36290ca68d1d\": container with ID starting with 304b4dbf35849b99ad645a5fee388de877d819e3c2a42e78628f36290ca68d1d not found: ID does not exist" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.650835 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.650863 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b7e58ca-126f-4175-8e81-8311a1de04b4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.799235 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-4vbpl"] Nov 24 13:43:27 crc kubenswrapper[5039]: E1124 13:43:27.799861 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b7e58ca-126f-4175-8e81-8311a1de04b4" containerName="init" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.799885 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b7e58ca-126f-4175-8e81-8311a1de04b4" containerName="init" Nov 24 13:43:27 crc kubenswrapper[5039]: E1124 13:43:27.799921 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b7e58ca-126f-4175-8e81-8311a1de04b4" containerName="dnsmasq-dns" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.799929 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b7e58ca-126f-4175-8e81-8311a1de04b4" containerName="dnsmasq-dns" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.800181 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b7e58ca-126f-4175-8e81-8311a1de04b4" containerName="dnsmasq-dns" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.801159 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-4vbpl" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.805074 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.805082 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.822253 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-4vbpl"] Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.876109 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-tztwk"] Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.885754 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-tztwk"] Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.957295 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a528dd72-29d9-43c4-8541-f6e416144724-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-4vbpl\" (UID: \"a528dd72-29d9-43c4-8541-f6e416144724\") " pod="openstack/nova-cell1-cell-mapping-4vbpl" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.957337 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgr46\" (UniqueName: \"kubernetes.io/projected/a528dd72-29d9-43c4-8541-f6e416144724-kube-api-access-vgr46\") pod \"nova-cell1-cell-mapping-4vbpl\" (UID: \"a528dd72-29d9-43c4-8541-f6e416144724\") " pod="openstack/nova-cell1-cell-mapping-4vbpl" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.957365 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a528dd72-29d9-43c4-8541-f6e416144724-config-data\") pod \"nova-cell1-cell-mapping-4vbpl\" (UID: \"a528dd72-29d9-43c4-8541-f6e416144724\") " pod="openstack/nova-cell1-cell-mapping-4vbpl" Nov 24 13:43:27 crc kubenswrapper[5039]: I1124 13:43:27.957881 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a528dd72-29d9-43c4-8541-f6e416144724-scripts\") pod \"nova-cell1-cell-mapping-4vbpl\" (UID: \"a528dd72-29d9-43c4-8541-f6e416144724\") " pod="openstack/nova-cell1-cell-mapping-4vbpl" Nov 24 13:43:28 crc kubenswrapper[5039]: I1124 13:43:28.059802 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a528dd72-29d9-43c4-8541-f6e416144724-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-4vbpl\" (UID: \"a528dd72-29d9-43c4-8541-f6e416144724\") " pod="openstack/nova-cell1-cell-mapping-4vbpl" Nov 24 13:43:28 crc kubenswrapper[5039]: I1124 13:43:28.059853 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgr46\" (UniqueName: \"kubernetes.io/projected/a528dd72-29d9-43c4-8541-f6e416144724-kube-api-access-vgr46\") pod \"nova-cell1-cell-mapping-4vbpl\" (UID: \"a528dd72-29d9-43c4-8541-f6e416144724\") " pod="openstack/nova-cell1-cell-mapping-4vbpl" Nov 24 13:43:28 crc kubenswrapper[5039]: I1124 13:43:28.059874 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a528dd72-29d9-43c4-8541-f6e416144724-config-data\") pod \"nova-cell1-cell-mapping-4vbpl\" (UID: \"a528dd72-29d9-43c4-8541-f6e416144724\") " pod="openstack/nova-cell1-cell-mapping-4vbpl" Nov 24 13:43:28 crc kubenswrapper[5039]: I1124 13:43:28.060013 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a528dd72-29d9-43c4-8541-f6e416144724-scripts\") pod \"nova-cell1-cell-mapping-4vbpl\" (UID: \"a528dd72-29d9-43c4-8541-f6e416144724\") " pod="openstack/nova-cell1-cell-mapping-4vbpl" Nov 24 13:43:28 crc kubenswrapper[5039]: I1124 13:43:28.065713 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a528dd72-29d9-43c4-8541-f6e416144724-scripts\") pod \"nova-cell1-cell-mapping-4vbpl\" (UID: \"a528dd72-29d9-43c4-8541-f6e416144724\") " pod="openstack/nova-cell1-cell-mapping-4vbpl" Nov 24 13:43:28 crc kubenswrapper[5039]: I1124 13:43:28.065857 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a528dd72-29d9-43c4-8541-f6e416144724-config-data\") pod \"nova-cell1-cell-mapping-4vbpl\" (UID: \"a528dd72-29d9-43c4-8541-f6e416144724\") " pod="openstack/nova-cell1-cell-mapping-4vbpl" Nov 24 13:43:28 crc kubenswrapper[5039]: I1124 13:43:28.068040 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a528dd72-29d9-43c4-8541-f6e416144724-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-4vbpl\" (UID: \"a528dd72-29d9-43c4-8541-f6e416144724\") " pod="openstack/nova-cell1-cell-mapping-4vbpl" Nov 24 13:43:28 crc kubenswrapper[5039]: I1124 13:43:28.078483 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgr46\" (UniqueName: \"kubernetes.io/projected/a528dd72-29d9-43c4-8541-f6e416144724-kube-api-access-vgr46\") pod \"nova-cell1-cell-mapping-4vbpl\" (UID: \"a528dd72-29d9-43c4-8541-f6e416144724\") " pod="openstack/nova-cell1-cell-mapping-4vbpl" Nov 24 13:43:28 crc kubenswrapper[5039]: I1124 13:43:28.116793 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-4vbpl" Nov 24 13:43:28 crc kubenswrapper[5039]: I1124 13:43:28.330939 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b7e58ca-126f-4175-8e81-8311a1de04b4" path="/var/lib/kubelet/pods/2b7e58ca-126f-4175-8e81-8311a1de04b4/volumes" Nov 24 13:43:28 crc kubenswrapper[5039]: I1124 13:43:28.542790 5039 generic.go:334] "Generic (PLEG): container finished" podID="523ac154-3c03-4930-8729-874c0e056d14" containerID="04d55bc3cb1f9257bfa7fd3ade3173850a184e5c3f113e6f4b7cfbfdc062e01f" exitCode=0 Nov 24 13:43:28 crc kubenswrapper[5039]: I1124 13:43:28.542833 5039 generic.go:334] "Generic (PLEG): container finished" podID="523ac154-3c03-4930-8729-874c0e056d14" containerID="5976dd6cd4254023640ff24c7c1652c2245f6ef018a78fdf12ec71bfdb4263af" exitCode=2 Nov 24 13:43:28 crc kubenswrapper[5039]: I1124 13:43:28.542843 5039 generic.go:334] "Generic (PLEG): container finished" podID="523ac154-3c03-4930-8729-874c0e056d14" containerID="2153ded787a942c651b7fe4c4920cee10e596508ee3e9c64ea97a21e6ed4ba5e" exitCode=0 Nov 24 13:43:28 crc kubenswrapper[5039]: I1124 13:43:28.542918 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"523ac154-3c03-4930-8729-874c0e056d14","Type":"ContainerDied","Data":"04d55bc3cb1f9257bfa7fd3ade3173850a184e5c3f113e6f4b7cfbfdc062e01f"} Nov 24 13:43:28 crc kubenswrapper[5039]: I1124 13:43:28.542952 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"523ac154-3c03-4930-8729-874c0e056d14","Type":"ContainerDied","Data":"5976dd6cd4254023640ff24c7c1652c2245f6ef018a78fdf12ec71bfdb4263af"} Nov 24 13:43:28 crc kubenswrapper[5039]: I1124 13:43:28.542964 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"523ac154-3c03-4930-8729-874c0e056d14","Type":"ContainerDied","Data":"2153ded787a942c651b7fe4c4920cee10e596508ee3e9c64ea97a21e6ed4ba5e"} Nov 24 13:43:28 crc kubenswrapper[5039]: I1124 13:43:28.622177 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-4vbpl"] Nov 24 13:43:29 crc kubenswrapper[5039]: I1124 13:43:29.557576 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-4vbpl" event={"ID":"a528dd72-29d9-43c4-8541-f6e416144724","Type":"ContainerStarted","Data":"deda2b7d214afbbb2221e74c6adc18fdcbbdb6361daacd63b80cc5d1e2b2617b"} Nov 24 13:43:29 crc kubenswrapper[5039]: I1124 13:43:29.557833 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-4vbpl" event={"ID":"a528dd72-29d9-43c4-8541-f6e416144724","Type":"ContainerStarted","Data":"91ac62306f713a63ebee82e72b27ef3094e3488e13f13e69b8c613e770f68393"} Nov 24 13:43:29 crc kubenswrapper[5039]: I1124 13:43:29.577737 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-4vbpl" podStartSLOduration=2.577720937 podStartE2EDuration="2.577720937s" podCreationTimestamp="2025-11-24 13:43:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:43:29.570828918 +0000 UTC m=+1522.009953418" watchObservedRunningTime="2025-11-24 13:43:29.577720937 +0000 UTC m=+1522.016845437" Nov 24 13:43:30 crc kubenswrapper[5039]: I1124 13:43:30.585707 5039 generic.go:334] "Generic (PLEG): container finished" podID="523ac154-3c03-4930-8729-874c0e056d14" containerID="fdf1012c4f06085ededc4eb3766a497da190f9093412099b6e1481573650aa3b" exitCode=0 Nov 24 13:43:30 crc kubenswrapper[5039]: I1124 13:43:30.586148 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"523ac154-3c03-4930-8729-874c0e056d14","Type":"ContainerDied","Data":"fdf1012c4f06085ededc4eb3766a497da190f9093412099b6e1481573650aa3b"} Nov 24 13:43:30 crc kubenswrapper[5039]: I1124 13:43:30.902571 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.017532 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/523ac154-3c03-4930-8729-874c0e056d14-log-httpd\") pod \"523ac154-3c03-4930-8729-874c0e056d14\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.017685 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7rdhq\" (UniqueName: \"kubernetes.io/projected/523ac154-3c03-4930-8729-874c0e056d14-kube-api-access-7rdhq\") pod \"523ac154-3c03-4930-8729-874c0e056d14\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.017758 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-combined-ca-bundle\") pod \"523ac154-3c03-4930-8729-874c0e056d14\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.017909 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-scripts\") pod \"523ac154-3c03-4930-8729-874c0e056d14\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.018053 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-sg-core-conf-yaml\") pod \"523ac154-3c03-4930-8729-874c0e056d14\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.018084 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/523ac154-3c03-4930-8729-874c0e056d14-run-httpd\") pod \"523ac154-3c03-4930-8729-874c0e056d14\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.018233 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-ceilometer-tls-certs\") pod \"523ac154-3c03-4930-8729-874c0e056d14\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.018261 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-config-data\") pod \"523ac154-3c03-4930-8729-874c0e056d14\" (UID: \"523ac154-3c03-4930-8729-874c0e056d14\") " Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.018686 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/523ac154-3c03-4930-8729-874c0e056d14-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "523ac154-3c03-4930-8729-874c0e056d14" (UID: "523ac154-3c03-4930-8729-874c0e056d14"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.019061 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/523ac154-3c03-4930-8729-874c0e056d14-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "523ac154-3c03-4930-8729-874c0e056d14" (UID: "523ac154-3c03-4930-8729-874c0e056d14"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.024166 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-scripts" (OuterVolumeSpecName: "scripts") pod "523ac154-3c03-4930-8729-874c0e056d14" (UID: "523ac154-3c03-4930-8729-874c0e056d14"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.024434 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/523ac154-3c03-4930-8729-874c0e056d14-kube-api-access-7rdhq" (OuterVolumeSpecName: "kube-api-access-7rdhq") pod "523ac154-3c03-4930-8729-874c0e056d14" (UID: "523ac154-3c03-4930-8729-874c0e056d14"). InnerVolumeSpecName "kube-api-access-7rdhq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.047084 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "523ac154-3c03-4930-8729-874c0e056d14" (UID: "523ac154-3c03-4930-8729-874c0e056d14"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.080337 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "523ac154-3c03-4930-8729-874c0e056d14" (UID: "523ac154-3c03-4930-8729-874c0e056d14"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.121243 5039 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.121279 5039 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/523ac154-3c03-4930-8729-874c0e056d14-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.121291 5039 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.121328 5039 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/523ac154-3c03-4930-8729-874c0e056d14-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.121342 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7rdhq\" (UniqueName: \"kubernetes.io/projected/523ac154-3c03-4930-8729-874c0e056d14-kube-api-access-7rdhq\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.121356 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.122807 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "523ac154-3c03-4930-8729-874c0e056d14" (UID: "523ac154-3c03-4930-8729-874c0e056d14"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.151619 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-config-data" (OuterVolumeSpecName: "config-data") pod "523ac154-3c03-4930-8729-874c0e056d14" (UID: "523ac154-3c03-4930-8729-874c0e056d14"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.223639 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.223671 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/523ac154-3c03-4930-8729-874c0e056d14-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.599638 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"523ac154-3c03-4930-8729-874c0e056d14","Type":"ContainerDied","Data":"2ea30297207c57e95d688da7e755067f5bad57f51cedde36d24036deb7b19e92"} Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.599699 5039 scope.go:117] "RemoveContainer" containerID="04d55bc3cb1f9257bfa7fd3ade3173850a184e5c3f113e6f4b7cfbfdc062e01f" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.599734 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.632203 5039 scope.go:117] "RemoveContainer" containerID="5976dd6cd4254023640ff24c7c1652c2245f6ef018a78fdf12ec71bfdb4263af" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.663828 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.680594 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.693735 5039 scope.go:117] "RemoveContainer" containerID="2153ded787a942c651b7fe4c4920cee10e596508ee3e9c64ea97a21e6ed4ba5e" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.751226 5039 scope.go:117] "RemoveContainer" containerID="fdf1012c4f06085ededc4eb3766a497da190f9093412099b6e1481573650aa3b" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.783108 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:31 crc kubenswrapper[5039]: E1124 13:43:31.784225 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="523ac154-3c03-4930-8729-874c0e056d14" containerName="ceilometer-central-agent" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.784242 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="523ac154-3c03-4930-8729-874c0e056d14" containerName="ceilometer-central-agent" Nov 24 13:43:31 crc kubenswrapper[5039]: E1124 13:43:31.784264 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="523ac154-3c03-4930-8729-874c0e056d14" containerName="proxy-httpd" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.784271 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="523ac154-3c03-4930-8729-874c0e056d14" containerName="proxy-httpd" Nov 24 13:43:31 crc kubenswrapper[5039]: E1124 13:43:31.784294 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="523ac154-3c03-4930-8729-874c0e056d14" containerName="sg-core" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.784300 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="523ac154-3c03-4930-8729-874c0e056d14" containerName="sg-core" Nov 24 13:43:31 crc kubenswrapper[5039]: E1124 13:43:31.784311 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="523ac154-3c03-4930-8729-874c0e056d14" containerName="ceilometer-notification-agent" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.784317 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="523ac154-3c03-4930-8729-874c0e056d14" containerName="ceilometer-notification-agent" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.790877 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="523ac154-3c03-4930-8729-874c0e056d14" containerName="sg-core" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.790919 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="523ac154-3c03-4930-8729-874c0e056d14" containerName="ceilometer-notification-agent" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.790932 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="523ac154-3c03-4930-8729-874c0e056d14" containerName="ceilometer-central-agent" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.790948 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="523ac154-3c03-4930-8729-874c0e056d14" containerName="proxy-httpd" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.799715 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.804093 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.809120 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.811357 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.832423 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.958612 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/817cea2c-2542-4f9c-8a3f-7ff06f359112-log-httpd\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.958674 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.958753 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-scripts\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.958823 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-config-data\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.958839 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.958867 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/817cea2c-2542-4f9c-8a3f-7ff06f359112-run-httpd\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.958898 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w29zl\" (UniqueName: \"kubernetes.io/projected/817cea2c-2542-4f9c-8a3f-7ff06f359112-kube-api-access-w29zl\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:31 crc kubenswrapper[5039]: I1124 13:43:31.958937 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:32 crc kubenswrapper[5039]: I1124 13:43:32.061394 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:32 crc kubenswrapper[5039]: I1124 13:43:32.061476 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-scripts\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:32 crc kubenswrapper[5039]: I1124 13:43:32.061611 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-config-data\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:32 crc kubenswrapper[5039]: I1124 13:43:32.061639 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:32 crc kubenswrapper[5039]: I1124 13:43:32.061683 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/817cea2c-2542-4f9c-8a3f-7ff06f359112-run-httpd\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:32 crc kubenswrapper[5039]: I1124 13:43:32.061723 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w29zl\" (UniqueName: \"kubernetes.io/projected/817cea2c-2542-4f9c-8a3f-7ff06f359112-kube-api-access-w29zl\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:32 crc kubenswrapper[5039]: I1124 13:43:32.061786 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:32 crc kubenswrapper[5039]: I1124 13:43:32.061837 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/817cea2c-2542-4f9c-8a3f-7ff06f359112-log-httpd\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:32 crc kubenswrapper[5039]: I1124 13:43:32.062196 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/817cea2c-2542-4f9c-8a3f-7ff06f359112-run-httpd\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:32 crc kubenswrapper[5039]: I1124 13:43:32.062234 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/817cea2c-2542-4f9c-8a3f-7ff06f359112-log-httpd\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:32 crc kubenswrapper[5039]: I1124 13:43:32.065651 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:32 crc kubenswrapper[5039]: I1124 13:43:32.066203 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:32 crc kubenswrapper[5039]: I1124 13:43:32.067448 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:32 crc kubenswrapper[5039]: I1124 13:43:32.067454 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-config-data\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:32 crc kubenswrapper[5039]: I1124 13:43:32.069639 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-scripts\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:32 crc kubenswrapper[5039]: I1124 13:43:32.081563 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w29zl\" (UniqueName: \"kubernetes.io/projected/817cea2c-2542-4f9c-8a3f-7ff06f359112-kube-api-access-w29zl\") pod \"ceilometer-0\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " pod="openstack/ceilometer-0" Nov 24 13:43:32 crc kubenswrapper[5039]: I1124 13:43:32.135120 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:43:32 crc kubenswrapper[5039]: I1124 13:43:32.323565 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="523ac154-3c03-4930-8729-874c0e056d14" path="/var/lib/kubelet/pods/523ac154-3c03-4930-8729-874c0e056d14/volumes" Nov 24 13:43:32 crc kubenswrapper[5039]: I1124 13:43:32.635232 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:43:32 crc kubenswrapper[5039]: W1124 13:43:32.958879 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e57550d_0004_4f59_882e_557913349848.slice/crio-6cab88ab1dd4d6b5b29438fcfa20b3f8bc01e77ed5bce0f58d8e1e900e136d70.scope WatchSource:0}: Error finding container 6cab88ab1dd4d6b5b29438fcfa20b3f8bc01e77ed5bce0f58d8e1e900e136d70: Status 404 returned error can't find the container with id 6cab88ab1dd4d6b5b29438fcfa20b3f8bc01e77ed5bce0f58d8e1e900e136d70 Nov 24 13:43:32 crc kubenswrapper[5039]: W1124 13:43:32.972034 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e57550d_0004_4f59_882e_557913349848.slice/crio-f9d3f3b2392c27e3e34fb8adadf1e5bf2ac1c29f088fcee9fc688fba80c682da.scope WatchSource:0}: Error finding container f9d3f3b2392c27e3e34fb8adadf1e5bf2ac1c29f088fcee9fc688fba80c682da: Status 404 returned error can't find the container with id f9d3f3b2392c27e3e34fb8adadf1e5bf2ac1c29f088fcee9fc688fba80c682da Nov 24 13:43:32 crc kubenswrapper[5039]: W1124 13:43:32.976726 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e57550d_0004_4f59_882e_557913349848.slice/crio-14a23fd2b0668280c9cff5bff206b8f2064163145c8d6c72da534b54843c8513.scope WatchSource:0}: Error finding container 14a23fd2b0668280c9cff5bff206b8f2064163145c8d6c72da534b54843c8513: Status 404 returned error can't find the container with id 14a23fd2b0668280c9cff5bff206b8f2064163145c8d6c72da534b54843c8513 Nov 24 13:43:32 crc kubenswrapper[5039]: W1124 13:43:32.977231 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e57550d_0004_4f59_882e_557913349848.slice/crio-c8e6ca3373445b885495c93b8a540a08075131dd92cd669c4eafd8d93e3f329a.scope WatchSource:0}: Error finding container c8e6ca3373445b885495c93b8a540a08075131dd92cd669c4eafd8d93e3f329a: Status 404 returned error can't find the container with id c8e6ca3373445b885495c93b8a540a08075131dd92cd669c4eafd8d93e3f329a Nov 24 13:43:32 crc kubenswrapper[5039]: W1124 13:43:32.987926 5039 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod523ac154_3c03_4930_8729_874c0e056d14.slice": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod523ac154_3c03_4930_8729_874c0e056d14.slice: no such file or directory Nov 24 13:43:33 crc kubenswrapper[5039]: E1124 13:43:33.205619 5039 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e57550d_0004_4f59_882e_557913349848.slice/crio-c29cf48c0649be699f3564de352af89816933499016e10bc574d5cc98d711de2\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5f10fd8_c9e2_4da0_ba9d_e9bde2e75443.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b7e58ca_126f_4175_8e81_8311a1de04b4.slice/crio-conmon-5f8fdd7de7be0c4c0d3edbfc3db5dfdac02d2a635b401e8ecfe235dd8f121017.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b7e58ca_126f_4175_8e81_8311a1de04b4.slice/crio-f9abc6cd4e5fe2cdb72b5744c49e753a52629d9d0366cf8caecccb5cae55d997\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e57550d_0004_4f59_882e_557913349848.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e57550d_0004_4f59_882e_557913349848.slice/crio-conmon-f9d3f3b2392c27e3e34fb8adadf1e5bf2ac1c29f088fcee9fc688fba80c682da.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5f10fd8_c9e2_4da0_ba9d_e9bde2e75443.slice/crio-224cfb9288ec9a52c6d1ef4b70b3fe444c88318831c75bd87a70ff638dca7a54.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5f10fd8_c9e2_4da0_ba9d_e9bde2e75443.slice/crio-f26ee9d9a1b9d2100d6527528a4c5e6e0e28cd8b94e88143ad37749aa4ad9758\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5f10fd8_c9e2_4da0_ba9d_e9bde2e75443.slice/crio-d3fb66e806b0ddadf9363c04962e0af179907d8f84096395d6cd1b010e804771.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce86b4cd_2cb0_4cec_8b42_22a855734a60.slice/crio-d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b7e58ca_126f_4175_8e81_8311a1de04b4.slice/crio-5f8fdd7de7be0c4c0d3edbfc3db5dfdac02d2a635b401e8ecfe235dd8f121017.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce86b4cd_2cb0_4cec_8b42_22a855734a60.slice/crio-conmon-d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5f10fd8_c9e2_4da0_ba9d_e9bde2e75443.slice/crio-conmon-d3fb66e806b0ddadf9363c04962e0af179907d8f84096395d6cd1b010e804771.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e57550d_0004_4f59_882e_557913349848.slice/crio-conmon-c8e6ca3373445b885495c93b8a540a08075131dd92cd669c4eafd8d93e3f329a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5f10fd8_c9e2_4da0_ba9d_e9bde2e75443.slice/crio-conmon-224cfb9288ec9a52c6d1ef4b70b3fe444c88318831c75bd87a70ff638dca7a54.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b7e58ca_126f_4175_8e81_8311a1de04b4.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0168214d_ac93_41c7_babc_e048a74fca46.slice/crio-40d5dd8ff768e378422a7e4617d363fa85e7c9000cf7dab745e477474cccec47.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e57550d_0004_4f59_882e_557913349848.slice/crio-conmon-6cab88ab1dd4d6b5b29438fcfa20b3f8bc01e77ed5bce0f58d8e1e900e136d70.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a180464_260b_456c_bb96_c83c69cd2258.slice/crio-92bf6ee72ff68a7a7298ff2aef698fb4a54e003083f0b9360c1eb721ffab2693\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e57550d_0004_4f59_882e_557913349848.slice/crio-conmon-14a23fd2b0668280c9cff5bff206b8f2064163145c8d6c72da534b54843c8513.scope\": RecentStats: unable to find data in memory cache]" Nov 24 13:43:33 crc kubenswrapper[5039]: I1124 13:43:33.635266 5039 generic.go:334] "Generic (PLEG): container finished" podID="0168214d-ac93-41c7-babc-e048a74fca46" containerID="40d5dd8ff768e378422a7e4617d363fa85e7c9000cf7dab745e477474cccec47" exitCode=137 Nov 24 13:43:33 crc kubenswrapper[5039]: I1124 13:43:33.635677 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0168214d-ac93-41c7-babc-e048a74fca46","Type":"ContainerDied","Data":"40d5dd8ff768e378422a7e4617d363fa85e7c9000cf7dab745e477474cccec47"} Nov 24 13:43:33 crc kubenswrapper[5039]: I1124 13:43:33.637382 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"817cea2c-2542-4f9c-8a3f-7ff06f359112","Type":"ContainerStarted","Data":"5b1d821ad540544a5e9ae893157806608c71bb04c9e5e6b334a7dd2ed168de4d"} Nov 24 13:43:33 crc kubenswrapper[5039]: I1124 13:43:33.637410 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"817cea2c-2542-4f9c-8a3f-7ff06f359112","Type":"ContainerStarted","Data":"0edaff7fe8006e5bbec10c359f2655471597205172bad9d44820bb2045563586"} Nov 24 13:43:33 crc kubenswrapper[5039]: I1124 13:43:33.887409 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 13:43:33 crc kubenswrapper[5039]: I1124 13:43:33.887858 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 13:43:33 crc kubenswrapper[5039]: I1124 13:43:33.975822 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.109047 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0168214d-ac93-41c7-babc-e048a74fca46-scripts\") pod \"0168214d-ac93-41c7-babc-e048a74fca46\" (UID: \"0168214d-ac93-41c7-babc-e048a74fca46\") " Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.109202 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8mrt6\" (UniqueName: \"kubernetes.io/projected/0168214d-ac93-41c7-babc-e048a74fca46-kube-api-access-8mrt6\") pod \"0168214d-ac93-41c7-babc-e048a74fca46\" (UID: \"0168214d-ac93-41c7-babc-e048a74fca46\") " Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.109273 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0168214d-ac93-41c7-babc-e048a74fca46-combined-ca-bundle\") pod \"0168214d-ac93-41c7-babc-e048a74fca46\" (UID: \"0168214d-ac93-41c7-babc-e048a74fca46\") " Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.109333 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0168214d-ac93-41c7-babc-e048a74fca46-config-data\") pod \"0168214d-ac93-41c7-babc-e048a74fca46\" (UID: \"0168214d-ac93-41c7-babc-e048a74fca46\") " Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.116688 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0168214d-ac93-41c7-babc-e048a74fca46-scripts" (OuterVolumeSpecName: "scripts") pod "0168214d-ac93-41c7-babc-e048a74fca46" (UID: "0168214d-ac93-41c7-babc-e048a74fca46"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.119426 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0168214d-ac93-41c7-babc-e048a74fca46-kube-api-access-8mrt6" (OuterVolumeSpecName: "kube-api-access-8mrt6") pod "0168214d-ac93-41c7-babc-e048a74fca46" (UID: "0168214d-ac93-41c7-babc-e048a74fca46"). InnerVolumeSpecName "kube-api-access-8mrt6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.212884 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8mrt6\" (UniqueName: \"kubernetes.io/projected/0168214d-ac93-41c7-babc-e048a74fca46-kube-api-access-8mrt6\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.212921 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0168214d-ac93-41c7-babc-e048a74fca46-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.254670 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0168214d-ac93-41c7-babc-e048a74fca46-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0168214d-ac93-41c7-babc-e048a74fca46" (UID: "0168214d-ac93-41c7-babc-e048a74fca46"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.262775 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0168214d-ac93-41c7-babc-e048a74fca46-config-data" (OuterVolumeSpecName: "config-data") pod "0168214d-ac93-41c7-babc-e048a74fca46" (UID: "0168214d-ac93-41c7-babc-e048a74fca46"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.314956 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0168214d-ac93-41c7-babc-e048a74fca46-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.315203 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0168214d-ac93-41c7-babc-e048a74fca46-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.652854 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"817cea2c-2542-4f9c-8a3f-7ff06f359112","Type":"ContainerStarted","Data":"6bf2f764b1d8cdacc5168f586602beae4dec8e5b2471541ab8fb1c13bc5a9e5a"} Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.655164 5039 generic.go:334] "Generic (PLEG): container finished" podID="a528dd72-29d9-43c4-8541-f6e416144724" containerID="deda2b7d214afbbb2221e74c6adc18fdcbbdb6361daacd63b80cc5d1e2b2617b" exitCode=0 Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.655252 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-4vbpl" event={"ID":"a528dd72-29d9-43c4-8541-f6e416144724","Type":"ContainerDied","Data":"deda2b7d214afbbb2221e74c6adc18fdcbbdb6361daacd63b80cc5d1e2b2617b"} Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.659110 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0168214d-ac93-41c7-babc-e048a74fca46","Type":"ContainerDied","Data":"326e269a5fc812f75e0b480e96e93e3705be740ce6e3c2e02a0726131e4b460c"} Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.659170 5039 scope.go:117] "RemoveContainer" containerID="40d5dd8ff768e378422a7e4617d363fa85e7c9000cf7dab745e477474cccec47" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.659338 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.693380 5039 scope.go:117] "RemoveContainer" containerID="eebe295f1f6a3e8cad4905c2789044b8df990702f9bd0b8b5e85507c6c52e4ed" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.743312 5039 scope.go:117] "RemoveContainer" containerID="9c80abddb304f82db0447974336a724f454f32d5e1537735e5bf4d039cd2ec4c" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.757845 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.774823 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.789802 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 24 13:43:34 crc kubenswrapper[5039]: E1124 13:43:34.790654 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0168214d-ac93-41c7-babc-e048a74fca46" containerName="aodh-notifier" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.790707 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0168214d-ac93-41c7-babc-e048a74fca46" containerName="aodh-notifier" Nov 24 13:43:34 crc kubenswrapper[5039]: E1124 13:43:34.790799 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0168214d-ac93-41c7-babc-e048a74fca46" containerName="aodh-api" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.790810 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0168214d-ac93-41c7-babc-e048a74fca46" containerName="aodh-api" Nov 24 13:43:34 crc kubenswrapper[5039]: E1124 13:43:34.790826 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0168214d-ac93-41c7-babc-e048a74fca46" containerName="aodh-evaluator" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.790834 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0168214d-ac93-41c7-babc-e048a74fca46" containerName="aodh-evaluator" Nov 24 13:43:34 crc kubenswrapper[5039]: E1124 13:43:34.790846 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0168214d-ac93-41c7-babc-e048a74fca46" containerName="aodh-listener" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.790881 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0168214d-ac93-41c7-babc-e048a74fca46" containerName="aodh-listener" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.791347 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0168214d-ac93-41c7-babc-e048a74fca46" containerName="aodh-notifier" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.791373 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0168214d-ac93-41c7-babc-e048a74fca46" containerName="aodh-api" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.791400 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0168214d-ac93-41c7-babc-e048a74fca46" containerName="aodh-listener" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.791415 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0168214d-ac93-41c7-babc-e048a74fca46" containerName="aodh-evaluator" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.796029 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.807832 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.808121 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.809404 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.811025 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.821246 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-k6pb7" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.821626 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.880469 5039 scope.go:117] "RemoveContainer" containerID="beb8453fafc170298ec053d21b44afd227725a0f96e46cb229b938bc0a550741" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.902730 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="770f9659-47d8-4af2-bc52-18fa13b7d10e" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.244:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.902731 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="770f9659-47d8-4af2-bc52-18fa13b7d10e" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.244:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.946909 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-config-data\") pod \"aodh-0\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " pod="openstack/aodh-0" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.946985 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-scripts\") pod \"aodh-0\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " pod="openstack/aodh-0" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.947004 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-combined-ca-bundle\") pod \"aodh-0\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " pod="openstack/aodh-0" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.947034 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-internal-tls-certs\") pod \"aodh-0\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " pod="openstack/aodh-0" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.947049 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-public-tls-certs\") pod \"aodh-0\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " pod="openstack/aodh-0" Nov 24 13:43:34 crc kubenswrapper[5039]: I1124 13:43:34.947065 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86c8n\" (UniqueName: \"kubernetes.io/projected/cc51777e-d169-47d1-bfe2-006a99d0ba7c-kube-api-access-86c8n\") pod \"aodh-0\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " pod="openstack/aodh-0" Nov 24 13:43:35 crc kubenswrapper[5039]: I1124 13:43:35.048293 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-public-tls-certs\") pod \"aodh-0\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " pod="openstack/aodh-0" Nov 24 13:43:35 crc kubenswrapper[5039]: I1124 13:43:35.048364 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86c8n\" (UniqueName: \"kubernetes.io/projected/cc51777e-d169-47d1-bfe2-006a99d0ba7c-kube-api-access-86c8n\") pod \"aodh-0\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " pod="openstack/aodh-0" Nov 24 13:43:35 crc kubenswrapper[5039]: I1124 13:43:35.048541 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-config-data\") pod \"aodh-0\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " pod="openstack/aodh-0" Nov 24 13:43:35 crc kubenswrapper[5039]: I1124 13:43:35.049182 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-scripts\") pod \"aodh-0\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " pod="openstack/aodh-0" Nov 24 13:43:35 crc kubenswrapper[5039]: I1124 13:43:35.049208 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-combined-ca-bundle\") pod \"aodh-0\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " pod="openstack/aodh-0" Nov 24 13:43:35 crc kubenswrapper[5039]: I1124 13:43:35.049240 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-internal-tls-certs\") pod \"aodh-0\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " pod="openstack/aodh-0" Nov 24 13:43:35 crc kubenswrapper[5039]: I1124 13:43:35.053881 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-public-tls-certs\") pod \"aodh-0\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " pod="openstack/aodh-0" Nov 24 13:43:35 crc kubenswrapper[5039]: I1124 13:43:35.056414 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-config-data\") pod \"aodh-0\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " pod="openstack/aodh-0" Nov 24 13:43:35 crc kubenswrapper[5039]: I1124 13:43:35.056949 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-combined-ca-bundle\") pod \"aodh-0\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " pod="openstack/aodh-0" Nov 24 13:43:35 crc kubenswrapper[5039]: I1124 13:43:35.056962 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-scripts\") pod \"aodh-0\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " pod="openstack/aodh-0" Nov 24 13:43:35 crc kubenswrapper[5039]: I1124 13:43:35.057359 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-internal-tls-certs\") pod \"aodh-0\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " pod="openstack/aodh-0" Nov 24 13:43:35 crc kubenswrapper[5039]: I1124 13:43:35.078137 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86c8n\" (UniqueName: \"kubernetes.io/projected/cc51777e-d169-47d1-bfe2-006a99d0ba7c-kube-api-access-86c8n\") pod \"aodh-0\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " pod="openstack/aodh-0" Nov 24 13:43:35 crc kubenswrapper[5039]: I1124 13:43:35.178549 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 24 13:43:35 crc kubenswrapper[5039]: I1124 13:43:35.308932 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:43:35 crc kubenswrapper[5039]: E1124 13:43:35.309381 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:43:35 crc kubenswrapper[5039]: I1124 13:43:35.677149 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"817cea2c-2542-4f9c-8a3f-7ff06f359112","Type":"ContainerStarted","Data":"91e400ca05181715fa556a024bf2780ac2d4d9d63f6d11873a8a7800988ed86c"} Nov 24 13:43:35 crc kubenswrapper[5039]: I1124 13:43:35.800303 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 24 13:43:35 crc kubenswrapper[5039]: I1124 13:43:35.806255 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.205017 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-4vbpl" Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.328969 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0168214d-ac93-41c7-babc-e048a74fca46" path="/var/lib/kubelet/pods/0168214d-ac93-41c7-babc-e048a74fca46/volumes" Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.383375 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a528dd72-29d9-43c4-8541-f6e416144724-config-data\") pod \"a528dd72-29d9-43c4-8541-f6e416144724\" (UID: \"a528dd72-29d9-43c4-8541-f6e416144724\") " Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.383688 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a528dd72-29d9-43c4-8541-f6e416144724-combined-ca-bundle\") pod \"a528dd72-29d9-43c4-8541-f6e416144724\" (UID: \"a528dd72-29d9-43c4-8541-f6e416144724\") " Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.383851 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a528dd72-29d9-43c4-8541-f6e416144724-scripts\") pod \"a528dd72-29d9-43c4-8541-f6e416144724\" (UID: \"a528dd72-29d9-43c4-8541-f6e416144724\") " Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.384309 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vgr46\" (UniqueName: \"kubernetes.io/projected/a528dd72-29d9-43c4-8541-f6e416144724-kube-api-access-vgr46\") pod \"a528dd72-29d9-43c4-8541-f6e416144724\" (UID: \"a528dd72-29d9-43c4-8541-f6e416144724\") " Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.389664 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a528dd72-29d9-43c4-8541-f6e416144724-kube-api-access-vgr46" (OuterVolumeSpecName: "kube-api-access-vgr46") pod "a528dd72-29d9-43c4-8541-f6e416144724" (UID: "a528dd72-29d9-43c4-8541-f6e416144724"). InnerVolumeSpecName "kube-api-access-vgr46". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.390548 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a528dd72-29d9-43c4-8541-f6e416144724-scripts" (OuterVolumeSpecName: "scripts") pod "a528dd72-29d9-43c4-8541-f6e416144724" (UID: "a528dd72-29d9-43c4-8541-f6e416144724"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.426608 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a528dd72-29d9-43c4-8541-f6e416144724-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a528dd72-29d9-43c4-8541-f6e416144724" (UID: "a528dd72-29d9-43c4-8541-f6e416144724"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.428753 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a528dd72-29d9-43c4-8541-f6e416144724-config-data" (OuterVolumeSpecName: "config-data") pod "a528dd72-29d9-43c4-8541-f6e416144724" (UID: "a528dd72-29d9-43c4-8541-f6e416144724"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.486889 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vgr46\" (UniqueName: \"kubernetes.io/projected/a528dd72-29d9-43c4-8541-f6e416144724-kube-api-access-vgr46\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.486919 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a528dd72-29d9-43c4-8541-f6e416144724-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.486929 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a528dd72-29d9-43c4-8541-f6e416144724-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.486938 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a528dd72-29d9-43c4-8541-f6e416144724-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.700134 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-4vbpl" event={"ID":"a528dd72-29d9-43c4-8541-f6e416144724","Type":"ContainerDied","Data":"91ac62306f713a63ebee82e72b27ef3094e3488e13f13e69b8c613e770f68393"} Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.700545 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="91ac62306f713a63ebee82e72b27ef3094e3488e13f13e69b8c613e770f68393" Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.700150 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-4vbpl" Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.714488 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"cc51777e-d169-47d1-bfe2-006a99d0ba7c","Type":"ContainerStarted","Data":"6cf190ed03996bcb9e0099229df6d350780f4a0e38269f6973ad8e4dd54c92b7"} Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.716895 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"817cea2c-2542-4f9c-8a3f-7ff06f359112","Type":"ContainerStarted","Data":"3d4b495984ba1d5382ba65e0df5ecb31574a09eed59fa445267d24869f8f2b5c"} Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.717041 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.747259 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.394184831 podStartE2EDuration="5.747233574s" podCreationTimestamp="2025-11-24 13:43:31 +0000 UTC" firstStartedPulling="2025-11-24 13:43:32.63757514 +0000 UTC m=+1525.076699640" lastFinishedPulling="2025-11-24 13:43:35.990623883 +0000 UTC m=+1528.429748383" observedRunningTime="2025-11-24 13:43:36.740168781 +0000 UTC m=+1529.179293291" watchObservedRunningTime="2025-11-24 13:43:36.747233574 +0000 UTC m=+1529.186358074" Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.872544 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.872846 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="770f9659-47d8-4af2-bc52-18fa13b7d10e" containerName="nova-api-log" containerID="cri-o://9668d8a8c6a8cc19fecb760b3a85686c868a613f96807f5b685bcc2aefe29046" gracePeriod=30 Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.873014 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="770f9659-47d8-4af2-bc52-18fa13b7d10e" containerName="nova-api-api" containerID="cri-o://e31fa4c95041a53a027f913246669668ea738a07cd4fb3a7b164e47c0b054f2d" gracePeriod=30 Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.891022 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.891266 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="366e8969-e52e-4bdf-8171-18d9dedff03c" containerName="nova-scheduler-scheduler" containerID="cri-o://7841fe8d5a386463852ff5a0699752832a5d497deff78e1603e25ba99a718fbc" gracePeriod=30 Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.909083 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.909367 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b" containerName="nova-metadata-log" containerID="cri-o://5d79751868aa423bc2ee3b734b640e180feacd89ffec45885adaabc31ee4bee7" gracePeriod=30 Nov 24 13:43:36 crc kubenswrapper[5039]: I1124 13:43:36.909545 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b" containerName="nova-metadata-metadata" containerID="cri-o://62bf54e687cfd6eea3f1d770802c144f1876aee8edafc03614fd2e61aa042c70" gracePeriod=30 Nov 24 13:43:37 crc kubenswrapper[5039]: I1124 13:43:37.729051 5039 generic.go:334] "Generic (PLEG): container finished" podID="254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b" containerID="5d79751868aa423bc2ee3b734b640e180feacd89ffec45885adaabc31ee4bee7" exitCode=143 Nov 24 13:43:37 crc kubenswrapper[5039]: I1124 13:43:37.729140 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b","Type":"ContainerDied","Data":"5d79751868aa423bc2ee3b734b640e180feacd89ffec45885adaabc31ee4bee7"} Nov 24 13:43:37 crc kubenswrapper[5039]: I1124 13:43:37.732747 5039 generic.go:334] "Generic (PLEG): container finished" podID="770f9659-47d8-4af2-bc52-18fa13b7d10e" containerID="9668d8a8c6a8cc19fecb760b3a85686c868a613f96807f5b685bcc2aefe29046" exitCode=143 Nov 24 13:43:37 crc kubenswrapper[5039]: I1124 13:43:37.732787 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"770f9659-47d8-4af2-bc52-18fa13b7d10e","Type":"ContainerDied","Data":"9668d8a8c6a8cc19fecb760b3a85686c868a613f96807f5b685bcc2aefe29046"} Nov 24 13:43:37 crc kubenswrapper[5039]: I1124 13:43:37.735733 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"cc51777e-d169-47d1-bfe2-006a99d0ba7c","Type":"ContainerStarted","Data":"5dd3da925ba59309f32c6a17ab10a6d7316c69f7ad5c9695a71c88cff1a1a8f4"} Nov 24 13:43:37 crc kubenswrapper[5039]: I1124 13:43:37.735768 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"cc51777e-d169-47d1-bfe2-006a99d0ba7c","Type":"ContainerStarted","Data":"c63d3b34d01c54e937720dc6e9a1e56385de8b6e7bf96ecf78a68d2aa6b1aa44"} Nov 24 13:43:38 crc kubenswrapper[5039]: I1124 13:43:38.755842 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"cc51777e-d169-47d1-bfe2-006a99d0ba7c","Type":"ContainerStarted","Data":"51f9030c77524c934dd16d9fa87ee58b0f150e4b4496fd77da3e7952a12f856e"} Nov 24 13:43:38 crc kubenswrapper[5039]: I1124 13:43:38.767682 5039 generic.go:334] "Generic (PLEG): container finished" podID="366e8969-e52e-4bdf-8171-18d9dedff03c" containerID="7841fe8d5a386463852ff5a0699752832a5d497deff78e1603e25ba99a718fbc" exitCode=0 Nov 24 13:43:38 crc kubenswrapper[5039]: I1124 13:43:38.767721 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"366e8969-e52e-4bdf-8171-18d9dedff03c","Type":"ContainerDied","Data":"7841fe8d5a386463852ff5a0699752832a5d497deff78e1603e25ba99a718fbc"} Nov 24 13:43:39 crc kubenswrapper[5039]: E1124 13:43:39.141112 5039 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7841fe8d5a386463852ff5a0699752832a5d497deff78e1603e25ba99a718fbc is running failed: container process not found" containerID="7841fe8d5a386463852ff5a0699752832a5d497deff78e1603e25ba99a718fbc" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 24 13:43:39 crc kubenswrapper[5039]: E1124 13:43:39.142140 5039 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7841fe8d5a386463852ff5a0699752832a5d497deff78e1603e25ba99a718fbc is running failed: container process not found" containerID="7841fe8d5a386463852ff5a0699752832a5d497deff78e1603e25ba99a718fbc" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 24 13:43:39 crc kubenswrapper[5039]: E1124 13:43:39.142573 5039 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7841fe8d5a386463852ff5a0699752832a5d497deff78e1603e25ba99a718fbc is running failed: container process not found" containerID="7841fe8d5a386463852ff5a0699752832a5d497deff78e1603e25ba99a718fbc" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 24 13:43:39 crc kubenswrapper[5039]: E1124 13:43:39.142615 5039 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7841fe8d5a386463852ff5a0699752832a5d497deff78e1603e25ba99a718fbc is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="366e8969-e52e-4bdf-8171-18d9dedff03c" containerName="nova-scheduler-scheduler" Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.221365 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.366654 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zv92t\" (UniqueName: \"kubernetes.io/projected/366e8969-e52e-4bdf-8171-18d9dedff03c-kube-api-access-zv92t\") pod \"366e8969-e52e-4bdf-8171-18d9dedff03c\" (UID: \"366e8969-e52e-4bdf-8171-18d9dedff03c\") " Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.366757 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/366e8969-e52e-4bdf-8171-18d9dedff03c-config-data\") pod \"366e8969-e52e-4bdf-8171-18d9dedff03c\" (UID: \"366e8969-e52e-4bdf-8171-18d9dedff03c\") " Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.366832 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/366e8969-e52e-4bdf-8171-18d9dedff03c-combined-ca-bundle\") pod \"366e8969-e52e-4bdf-8171-18d9dedff03c\" (UID: \"366e8969-e52e-4bdf-8171-18d9dedff03c\") " Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.376726 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/366e8969-e52e-4bdf-8171-18d9dedff03c-kube-api-access-zv92t" (OuterVolumeSpecName: "kube-api-access-zv92t") pod "366e8969-e52e-4bdf-8171-18d9dedff03c" (UID: "366e8969-e52e-4bdf-8171-18d9dedff03c"). InnerVolumeSpecName "kube-api-access-zv92t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.416874 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/366e8969-e52e-4bdf-8171-18d9dedff03c-config-data" (OuterVolumeSpecName: "config-data") pod "366e8969-e52e-4bdf-8171-18d9dedff03c" (UID: "366e8969-e52e-4bdf-8171-18d9dedff03c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.421010 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/366e8969-e52e-4bdf-8171-18d9dedff03c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "366e8969-e52e-4bdf-8171-18d9dedff03c" (UID: "366e8969-e52e-4bdf-8171-18d9dedff03c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.468926 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zv92t\" (UniqueName: \"kubernetes.io/projected/366e8969-e52e-4bdf-8171-18d9dedff03c-kube-api-access-zv92t\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.468953 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/366e8969-e52e-4bdf-8171-18d9dedff03c-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.468962 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/366e8969-e52e-4bdf-8171-18d9dedff03c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.782737 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"cc51777e-d169-47d1-bfe2-006a99d0ba7c","Type":"ContainerStarted","Data":"fa3376a8b799f58ed41cfeec6ce65c0643a72b63585966ef7aca2df99a3a8532"} Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.785825 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"366e8969-e52e-4bdf-8171-18d9dedff03c","Type":"ContainerDied","Data":"f8744ca9780fd4ed4b303020159546d487c69bfc639f356160fe49b304c37eaf"} Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.785887 5039 scope.go:117] "RemoveContainer" containerID="7841fe8d5a386463852ff5a0699752832a5d497deff78e1603e25ba99a718fbc" Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.785991 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.817855 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.855730617 podStartE2EDuration="5.817828999s" podCreationTimestamp="2025-11-24 13:43:34 +0000 UTC" firstStartedPulling="2025-11-24 13:43:35.805946067 +0000 UTC m=+1528.245070567" lastFinishedPulling="2025-11-24 13:43:38.768044449 +0000 UTC m=+1531.207168949" observedRunningTime="2025-11-24 13:43:39.804218886 +0000 UTC m=+1532.243343406" watchObservedRunningTime="2025-11-24 13:43:39.817828999 +0000 UTC m=+1532.256953499" Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.898213 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.913338 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.926347 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 13:43:39 crc kubenswrapper[5039]: E1124 13:43:39.933956 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="366e8969-e52e-4bdf-8171-18d9dedff03c" containerName="nova-scheduler-scheduler" Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.934002 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="366e8969-e52e-4bdf-8171-18d9dedff03c" containerName="nova-scheduler-scheduler" Nov 24 13:43:39 crc kubenswrapper[5039]: E1124 13:43:39.934018 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a528dd72-29d9-43c4-8541-f6e416144724" containerName="nova-manage" Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.934026 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="a528dd72-29d9-43c4-8541-f6e416144724" containerName="nova-manage" Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.934264 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="366e8969-e52e-4bdf-8171-18d9dedff03c" containerName="nova-scheduler-scheduler" Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.934290 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="a528dd72-29d9-43c4-8541-f6e416144724" containerName="nova-manage" Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.935328 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.941558 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 24 13:43:39 crc kubenswrapper[5039]: I1124 13:43:39.942916 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 13:43:40 crc kubenswrapper[5039]: I1124 13:43:40.091364 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rrnq\" (UniqueName: \"kubernetes.io/projected/04160090-3eab-412c-a6e0-6946a44bcb81-kube-api-access-7rrnq\") pod \"nova-scheduler-0\" (UID: \"04160090-3eab-412c-a6e0-6946a44bcb81\") " pod="openstack/nova-scheduler-0" Nov 24 13:43:40 crc kubenswrapper[5039]: I1124 13:43:40.091485 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04160090-3eab-412c-a6e0-6946a44bcb81-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"04160090-3eab-412c-a6e0-6946a44bcb81\") " pod="openstack/nova-scheduler-0" Nov 24 13:43:40 crc kubenswrapper[5039]: I1124 13:43:40.091627 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04160090-3eab-412c-a6e0-6946a44bcb81-config-data\") pod \"nova-scheduler-0\" (UID: \"04160090-3eab-412c-a6e0-6946a44bcb81\") " pod="openstack/nova-scheduler-0" Nov 24 13:43:40 crc kubenswrapper[5039]: I1124 13:43:40.193973 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rrnq\" (UniqueName: \"kubernetes.io/projected/04160090-3eab-412c-a6e0-6946a44bcb81-kube-api-access-7rrnq\") pod \"nova-scheduler-0\" (UID: \"04160090-3eab-412c-a6e0-6946a44bcb81\") " pod="openstack/nova-scheduler-0" Nov 24 13:43:40 crc kubenswrapper[5039]: I1124 13:43:40.194114 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04160090-3eab-412c-a6e0-6946a44bcb81-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"04160090-3eab-412c-a6e0-6946a44bcb81\") " pod="openstack/nova-scheduler-0" Nov 24 13:43:40 crc kubenswrapper[5039]: I1124 13:43:40.194207 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04160090-3eab-412c-a6e0-6946a44bcb81-config-data\") pod \"nova-scheduler-0\" (UID: \"04160090-3eab-412c-a6e0-6946a44bcb81\") " pod="openstack/nova-scheduler-0" Nov 24 13:43:40 crc kubenswrapper[5039]: I1124 13:43:40.199388 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04160090-3eab-412c-a6e0-6946a44bcb81-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"04160090-3eab-412c-a6e0-6946a44bcb81\") " pod="openstack/nova-scheduler-0" Nov 24 13:43:40 crc kubenswrapper[5039]: I1124 13:43:40.199731 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04160090-3eab-412c-a6e0-6946a44bcb81-config-data\") pod \"nova-scheduler-0\" (UID: \"04160090-3eab-412c-a6e0-6946a44bcb81\") " pod="openstack/nova-scheduler-0" Nov 24 13:43:40 crc kubenswrapper[5039]: I1124 13:43:40.224162 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rrnq\" (UniqueName: \"kubernetes.io/projected/04160090-3eab-412c-a6e0-6946a44bcb81-kube-api-access-7rrnq\") pod \"nova-scheduler-0\" (UID: \"04160090-3eab-412c-a6e0-6946a44bcb81\") " pod="openstack/nova-scheduler-0" Nov 24 13:43:40 crc kubenswrapper[5039]: I1124 13:43:40.268406 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 13:43:40 crc kubenswrapper[5039]: I1124 13:43:40.328288 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="366e8969-e52e-4bdf-8171-18d9dedff03c" path="/var/lib/kubelet/pods/366e8969-e52e-4bdf-8171-18d9dedff03c/volumes" Nov 24 13:43:40 crc kubenswrapper[5039]: I1124 13:43:40.329631 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.233:8775/\": read tcp 10.217.0.2:47116->10.217.0.233:8775: read: connection reset by peer" Nov 24 13:43:40 crc kubenswrapper[5039]: I1124 13:43:40.330160 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.233:8775/\": read tcp 10.217.0.2:47114->10.217.0.233:8775: read: connection reset by peer" Nov 24 13:43:40 crc kubenswrapper[5039]: I1124 13:43:40.838209 5039 generic.go:334] "Generic (PLEG): container finished" podID="254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b" containerID="62bf54e687cfd6eea3f1d770802c144f1876aee8edafc03614fd2e61aa042c70" exitCode=0 Nov 24 13:43:40 crc kubenswrapper[5039]: I1124 13:43:40.838690 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b","Type":"ContainerDied","Data":"62bf54e687cfd6eea3f1d770802c144f1876aee8edafc03614fd2e61aa042c70"} Nov 24 13:43:40 crc kubenswrapper[5039]: I1124 13:43:40.847720 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.049721 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.219192 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-config-data\") pod \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\" (UID: \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\") " Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.219599 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-nova-metadata-tls-certs\") pod \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\" (UID: \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\") " Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.219654 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-logs\") pod \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\" (UID: \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\") " Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.219702 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-combined-ca-bundle\") pod \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\" (UID: \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\") " Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.219801 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hssm9\" (UniqueName: \"kubernetes.io/projected/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-kube-api-access-hssm9\") pod \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\" (UID: \"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b\") " Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.221382 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-logs" (OuterVolumeSpecName: "logs") pod "254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b" (UID: "254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.226151 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-kube-api-access-hssm9" (OuterVolumeSpecName: "kube-api-access-hssm9") pod "254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b" (UID: "254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b"). InnerVolumeSpecName "kube-api-access-hssm9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.267079 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b" (UID: "254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.311410 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b" (UID: "254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.316167 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-config-data" (OuterVolumeSpecName: "config-data") pod "254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b" (UID: "254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.322889 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hssm9\" (UniqueName: \"kubernetes.io/projected/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-kube-api-access-hssm9\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.322948 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.322961 5039 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.322972 5039 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-logs\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.322981 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.638737 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.730350 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-combined-ca-bundle\") pod \"770f9659-47d8-4af2-bc52-18fa13b7d10e\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.730431 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-internal-tls-certs\") pod \"770f9659-47d8-4af2-bc52-18fa13b7d10e\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.730581 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/770f9659-47d8-4af2-bc52-18fa13b7d10e-logs\") pod \"770f9659-47d8-4af2-bc52-18fa13b7d10e\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.730751 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-public-tls-certs\") pod \"770f9659-47d8-4af2-bc52-18fa13b7d10e\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.731004 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/770f9659-47d8-4af2-bc52-18fa13b7d10e-logs" (OuterVolumeSpecName: "logs") pod "770f9659-47d8-4af2-bc52-18fa13b7d10e" (UID: "770f9659-47d8-4af2-bc52-18fa13b7d10e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.731085 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4zbdp\" (UniqueName: \"kubernetes.io/projected/770f9659-47d8-4af2-bc52-18fa13b7d10e-kube-api-access-4zbdp\") pod \"770f9659-47d8-4af2-bc52-18fa13b7d10e\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.731145 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-config-data\") pod \"770f9659-47d8-4af2-bc52-18fa13b7d10e\" (UID: \"770f9659-47d8-4af2-bc52-18fa13b7d10e\") " Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.731801 5039 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/770f9659-47d8-4af2-bc52-18fa13b7d10e-logs\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.734408 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/770f9659-47d8-4af2-bc52-18fa13b7d10e-kube-api-access-4zbdp" (OuterVolumeSpecName: "kube-api-access-4zbdp") pod "770f9659-47d8-4af2-bc52-18fa13b7d10e" (UID: "770f9659-47d8-4af2-bc52-18fa13b7d10e"). InnerVolumeSpecName "kube-api-access-4zbdp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.761648 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-config-data" (OuterVolumeSpecName: "config-data") pod "770f9659-47d8-4af2-bc52-18fa13b7d10e" (UID: "770f9659-47d8-4af2-bc52-18fa13b7d10e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.766731 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "770f9659-47d8-4af2-bc52-18fa13b7d10e" (UID: "770f9659-47d8-4af2-bc52-18fa13b7d10e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.783057 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "770f9659-47d8-4af2-bc52-18fa13b7d10e" (UID: "770f9659-47d8-4af2-bc52-18fa13b7d10e"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.789458 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "770f9659-47d8-4af2-bc52-18fa13b7d10e" (UID: "770f9659-47d8-4af2-bc52-18fa13b7d10e"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.833657 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.833703 5039 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.833716 5039 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.833731 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4zbdp\" (UniqueName: \"kubernetes.io/projected/770f9659-47d8-4af2-bc52-18fa13b7d10e-kube-api-access-4zbdp\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.833747 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/770f9659-47d8-4af2-bc52-18fa13b7d10e-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.858454 5039 generic.go:334] "Generic (PLEG): container finished" podID="770f9659-47d8-4af2-bc52-18fa13b7d10e" containerID="e31fa4c95041a53a027f913246669668ea738a07cd4fb3a7b164e47c0b054f2d" exitCode=0 Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.858492 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.858556 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"770f9659-47d8-4af2-bc52-18fa13b7d10e","Type":"ContainerDied","Data":"e31fa4c95041a53a027f913246669668ea738a07cd4fb3a7b164e47c0b054f2d"} Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.858587 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"770f9659-47d8-4af2-bc52-18fa13b7d10e","Type":"ContainerDied","Data":"65cba305f72f81f1cc0940694b179bc6f4cb2ce829b790c23a85a54e62a07865"} Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.858608 5039 scope.go:117] "RemoveContainer" containerID="e31fa4c95041a53a027f913246669668ea738a07cd4fb3a7b164e47c0b054f2d" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.862057 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.862890 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b","Type":"ContainerDied","Data":"b65e4237f91e86058d39ca99008d8feeed9c930dc27b068da5efd6d99433305e"} Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.866997 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"04160090-3eab-412c-a6e0-6946a44bcb81","Type":"ContainerStarted","Data":"371f5d20f9f479d3d3a60902815cb0a99dcf289b74c8cb882c1646b8c4f1d07a"} Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.867044 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"04160090-3eab-412c-a6e0-6946a44bcb81","Type":"ContainerStarted","Data":"243eb531974ddadf8883cf4635d69e14f0ded30eb0cbba0bad7b31d321832d33"} Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.895738 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.8957148200000002 podStartE2EDuration="2.89571482s" podCreationTimestamp="2025-11-24 13:43:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:43:41.885941841 +0000 UTC m=+1534.325066351" watchObservedRunningTime="2025-11-24 13:43:41.89571482 +0000 UTC m=+1534.334839320" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.941764 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.947844 5039 scope.go:117] "RemoveContainer" containerID="9668d8a8c6a8cc19fecb760b3a85686c868a613f96807f5b685bcc2aefe29046" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.956977 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.975124 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 24 13:43:41 crc kubenswrapper[5039]: E1124 13:43:41.977071 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b" containerName="nova-metadata-log" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.977103 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b" containerName="nova-metadata-log" Nov 24 13:43:41 crc kubenswrapper[5039]: E1124 13:43:41.977125 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="770f9659-47d8-4af2-bc52-18fa13b7d10e" containerName="nova-api-api" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.977132 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="770f9659-47d8-4af2-bc52-18fa13b7d10e" containerName="nova-api-api" Nov 24 13:43:41 crc kubenswrapper[5039]: E1124 13:43:41.977155 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b" containerName="nova-metadata-metadata" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.977161 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b" containerName="nova-metadata-metadata" Nov 24 13:43:41 crc kubenswrapper[5039]: E1124 13:43:41.977181 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="770f9659-47d8-4af2-bc52-18fa13b7d10e" containerName="nova-api-log" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.977186 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="770f9659-47d8-4af2-bc52-18fa13b7d10e" containerName="nova-api-log" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.977375 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="770f9659-47d8-4af2-bc52-18fa13b7d10e" containerName="nova-api-api" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.977388 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="770f9659-47d8-4af2-bc52-18fa13b7d10e" containerName="nova-api-log" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.977398 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b" containerName="nova-metadata-log" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.977406 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b" containerName="nova-metadata-metadata" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.978491 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.981482 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.982012 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.984281 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.992369 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.997793 5039 scope.go:117] "RemoveContainer" containerID="e31fa4c95041a53a027f913246669668ea738a07cd4fb3a7b164e47c0b054f2d" Nov 24 13:43:41 crc kubenswrapper[5039]: E1124 13:43:41.998298 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e31fa4c95041a53a027f913246669668ea738a07cd4fb3a7b164e47c0b054f2d\": container with ID starting with e31fa4c95041a53a027f913246669668ea738a07cd4fb3a7b164e47c0b054f2d not found: ID does not exist" containerID="e31fa4c95041a53a027f913246669668ea738a07cd4fb3a7b164e47c0b054f2d" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.998325 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e31fa4c95041a53a027f913246669668ea738a07cd4fb3a7b164e47c0b054f2d"} err="failed to get container status \"e31fa4c95041a53a027f913246669668ea738a07cd4fb3a7b164e47c0b054f2d\": rpc error: code = NotFound desc = could not find container \"e31fa4c95041a53a027f913246669668ea738a07cd4fb3a7b164e47c0b054f2d\": container with ID starting with e31fa4c95041a53a027f913246669668ea738a07cd4fb3a7b164e47c0b054f2d not found: ID does not exist" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.998349 5039 scope.go:117] "RemoveContainer" containerID="9668d8a8c6a8cc19fecb760b3a85686c868a613f96807f5b685bcc2aefe29046" Nov 24 13:43:41 crc kubenswrapper[5039]: E1124 13:43:41.998529 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9668d8a8c6a8cc19fecb760b3a85686c868a613f96807f5b685bcc2aefe29046\": container with ID starting with 9668d8a8c6a8cc19fecb760b3a85686c868a613f96807f5b685bcc2aefe29046 not found: ID does not exist" containerID="9668d8a8c6a8cc19fecb760b3a85686c868a613f96807f5b685bcc2aefe29046" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.998550 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9668d8a8c6a8cc19fecb760b3a85686c868a613f96807f5b685bcc2aefe29046"} err="failed to get container status \"9668d8a8c6a8cc19fecb760b3a85686c868a613f96807f5b685bcc2aefe29046\": rpc error: code = NotFound desc = could not find container \"9668d8a8c6a8cc19fecb760b3a85686c868a613f96807f5b685bcc2aefe29046\": container with ID starting with 9668d8a8c6a8cc19fecb760b3a85686c868a613f96807f5b685bcc2aefe29046 not found: ID does not exist" Nov 24 13:43:41 crc kubenswrapper[5039]: I1124 13:43:41.998562 5039 scope.go:117] "RemoveContainer" containerID="62bf54e687cfd6eea3f1d770802c144f1876aee8edafc03614fd2e61aa042c70" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.007824 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.037801 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.039678 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.045921 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.046179 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.051659 5039 scope.go:117] "RemoveContainer" containerID="5d79751868aa423bc2ee3b734b640e180feacd89ffec45885adaabc31ee4bee7" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.086580 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.142383 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2zbd\" (UniqueName: \"kubernetes.io/projected/42be8bb1-8823-4a1f-8777-348baedb7758-kube-api-access-l2zbd\") pod \"nova-api-0\" (UID: \"42be8bb1-8823-4a1f-8777-348baedb7758\") " pod="openstack/nova-api-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.142814 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42be8bb1-8823-4a1f-8777-348baedb7758-logs\") pod \"nova-api-0\" (UID: \"42be8bb1-8823-4a1f-8777-348baedb7758\") " pod="openstack/nova-api-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.142841 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42be8bb1-8823-4a1f-8777-348baedb7758-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"42be8bb1-8823-4a1f-8777-348baedb7758\") " pod="openstack/nova-api-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.142863 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42be8bb1-8823-4a1f-8777-348baedb7758-config-data\") pod \"nova-api-0\" (UID: \"42be8bb1-8823-4a1f-8777-348baedb7758\") " pod="openstack/nova-api-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.142893 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/42be8bb1-8823-4a1f-8777-348baedb7758-internal-tls-certs\") pod \"nova-api-0\" (UID: \"42be8bb1-8823-4a1f-8777-348baedb7758\") " pod="openstack/nova-api-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.142979 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c245130-8f33-4226-b312-9573746acd0f-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1c245130-8f33-4226-b312-9573746acd0f\") " pod="openstack/nova-metadata-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.143016 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c245130-8f33-4226-b312-9573746acd0f-config-data\") pod \"nova-metadata-0\" (UID: \"1c245130-8f33-4226-b312-9573746acd0f\") " pod="openstack/nova-metadata-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.143054 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/42be8bb1-8823-4a1f-8777-348baedb7758-public-tls-certs\") pod \"nova-api-0\" (UID: \"42be8bb1-8823-4a1f-8777-348baedb7758\") " pod="openstack/nova-api-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.143168 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c245130-8f33-4226-b312-9573746acd0f-logs\") pod \"nova-metadata-0\" (UID: \"1c245130-8f33-4226-b312-9573746acd0f\") " pod="openstack/nova-metadata-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.143223 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c245130-8f33-4226-b312-9573746acd0f-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1c245130-8f33-4226-b312-9573746acd0f\") " pod="openstack/nova-metadata-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.143260 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jq9j9\" (UniqueName: \"kubernetes.io/projected/1c245130-8f33-4226-b312-9573746acd0f-kube-api-access-jq9j9\") pod \"nova-metadata-0\" (UID: \"1c245130-8f33-4226-b312-9573746acd0f\") " pod="openstack/nova-metadata-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.144426 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.245165 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c245130-8f33-4226-b312-9573746acd0f-config-data\") pod \"nova-metadata-0\" (UID: \"1c245130-8f33-4226-b312-9573746acd0f\") " pod="openstack/nova-metadata-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.245223 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/42be8bb1-8823-4a1f-8777-348baedb7758-public-tls-certs\") pod \"nova-api-0\" (UID: \"42be8bb1-8823-4a1f-8777-348baedb7758\") " pod="openstack/nova-api-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.245306 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c245130-8f33-4226-b312-9573746acd0f-logs\") pod \"nova-metadata-0\" (UID: \"1c245130-8f33-4226-b312-9573746acd0f\") " pod="openstack/nova-metadata-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.245345 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c245130-8f33-4226-b312-9573746acd0f-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1c245130-8f33-4226-b312-9573746acd0f\") " pod="openstack/nova-metadata-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.245376 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jq9j9\" (UniqueName: \"kubernetes.io/projected/1c245130-8f33-4226-b312-9573746acd0f-kube-api-access-jq9j9\") pod \"nova-metadata-0\" (UID: \"1c245130-8f33-4226-b312-9573746acd0f\") " pod="openstack/nova-metadata-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.245415 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2zbd\" (UniqueName: \"kubernetes.io/projected/42be8bb1-8823-4a1f-8777-348baedb7758-kube-api-access-l2zbd\") pod \"nova-api-0\" (UID: \"42be8bb1-8823-4a1f-8777-348baedb7758\") " pod="openstack/nova-api-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.245471 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42be8bb1-8823-4a1f-8777-348baedb7758-logs\") pod \"nova-api-0\" (UID: \"42be8bb1-8823-4a1f-8777-348baedb7758\") " pod="openstack/nova-api-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.245654 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42be8bb1-8823-4a1f-8777-348baedb7758-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"42be8bb1-8823-4a1f-8777-348baedb7758\") " pod="openstack/nova-api-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.245697 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42be8bb1-8823-4a1f-8777-348baedb7758-config-data\") pod \"nova-api-0\" (UID: \"42be8bb1-8823-4a1f-8777-348baedb7758\") " pod="openstack/nova-api-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.245718 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/42be8bb1-8823-4a1f-8777-348baedb7758-internal-tls-certs\") pod \"nova-api-0\" (UID: \"42be8bb1-8823-4a1f-8777-348baedb7758\") " pod="openstack/nova-api-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.245784 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c245130-8f33-4226-b312-9573746acd0f-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1c245130-8f33-4226-b312-9573746acd0f\") " pod="openstack/nova-metadata-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.248974 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42be8bb1-8823-4a1f-8777-348baedb7758-logs\") pod \"nova-api-0\" (UID: \"42be8bb1-8823-4a1f-8777-348baedb7758\") " pod="openstack/nova-api-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.249276 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c245130-8f33-4226-b312-9573746acd0f-logs\") pod \"nova-metadata-0\" (UID: \"1c245130-8f33-4226-b312-9573746acd0f\") " pod="openstack/nova-metadata-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.250553 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c245130-8f33-4226-b312-9573746acd0f-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1c245130-8f33-4226-b312-9573746acd0f\") " pod="openstack/nova-metadata-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.255329 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c245130-8f33-4226-b312-9573746acd0f-config-data\") pod \"nova-metadata-0\" (UID: \"1c245130-8f33-4226-b312-9573746acd0f\") " pod="openstack/nova-metadata-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.261931 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/42be8bb1-8823-4a1f-8777-348baedb7758-public-tls-certs\") pod \"nova-api-0\" (UID: \"42be8bb1-8823-4a1f-8777-348baedb7758\") " pod="openstack/nova-api-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.263658 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/42be8bb1-8823-4a1f-8777-348baedb7758-internal-tls-certs\") pod \"nova-api-0\" (UID: \"42be8bb1-8823-4a1f-8777-348baedb7758\") " pod="openstack/nova-api-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.264011 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42be8bb1-8823-4a1f-8777-348baedb7758-config-data\") pod \"nova-api-0\" (UID: \"42be8bb1-8823-4a1f-8777-348baedb7758\") " pod="openstack/nova-api-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.265871 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42be8bb1-8823-4a1f-8777-348baedb7758-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"42be8bb1-8823-4a1f-8777-348baedb7758\") " pod="openstack/nova-api-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.267127 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c245130-8f33-4226-b312-9573746acd0f-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1c245130-8f33-4226-b312-9573746acd0f\") " pod="openstack/nova-metadata-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.267549 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jq9j9\" (UniqueName: \"kubernetes.io/projected/1c245130-8f33-4226-b312-9573746acd0f-kube-api-access-jq9j9\") pod \"nova-metadata-0\" (UID: \"1c245130-8f33-4226-b312-9573746acd0f\") " pod="openstack/nova-metadata-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.271918 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2zbd\" (UniqueName: \"kubernetes.io/projected/42be8bb1-8823-4a1f-8777-348baedb7758-kube-api-access-l2zbd\") pod \"nova-api-0\" (UID: \"42be8bb1-8823-4a1f-8777-348baedb7758\") " pod="openstack/nova-api-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.307732 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.342740 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b" path="/var/lib/kubelet/pods/254edee5-b7e9-44a8-b5dc-8c68a7ec1d1b/volumes" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.343635 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="770f9659-47d8-4af2-bc52-18fa13b7d10e" path="/var/lib/kubelet/pods/770f9659-47d8-4af2-bc52-18fa13b7d10e/volumes" Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.366452 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 13:43:42 crc kubenswrapper[5039]: W1124 13:43:42.817803 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod42be8bb1_8823_4a1f_8777_348baedb7758.slice/crio-24b749625560f6683ea5c6fa027d15d0ed4fb4b775975860fed5f931c64c80af WatchSource:0}: Error finding container 24b749625560f6683ea5c6fa027d15d0ed4fb4b775975860fed5f931c64c80af: Status 404 returned error can't find the container with id 24b749625560f6683ea5c6fa027d15d0ed4fb4b775975860fed5f931c64c80af Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.822040 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.881608 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"42be8bb1-8823-4a1f-8777-348baedb7758","Type":"ContainerStarted","Data":"24b749625560f6683ea5c6fa027d15d0ed4fb4b775975860fed5f931c64c80af"} Nov 24 13:43:42 crc kubenswrapper[5039]: I1124 13:43:42.904342 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 13:43:42 crc kubenswrapper[5039]: W1124 13:43:42.905322 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c245130_8f33_4226_b312_9573746acd0f.slice/crio-d4bf9b56ea13502d9085fa7206be6b76759dd5b116dc2661016c24e8a37c1f61 WatchSource:0}: Error finding container d4bf9b56ea13502d9085fa7206be6b76759dd5b116dc2661016c24e8a37c1f61: Status 404 returned error can't find the container with id d4bf9b56ea13502d9085fa7206be6b76759dd5b116dc2661016c24e8a37c1f61 Nov 24 13:43:43 crc kubenswrapper[5039]: E1124 13:43:43.487772 5039 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a180464_260b_456c_bb96_c83c69cd2258.slice/crio-92bf6ee72ff68a7a7298ff2aef698fb4a54e003083f0b9360c1eb721ffab2693\": RecentStats: unable to find data in memory cache]" Nov 24 13:43:43 crc kubenswrapper[5039]: I1124 13:43:43.896869 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"42be8bb1-8823-4a1f-8777-348baedb7758","Type":"ContainerStarted","Data":"5142dd87bddab81c67269086ba7acbd6f5589ff998930f068db64825dbc3d731"} Nov 24 13:43:43 crc kubenswrapper[5039]: I1124 13:43:43.897638 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"42be8bb1-8823-4a1f-8777-348baedb7758","Type":"ContainerStarted","Data":"d131d655e1c763091c3c64911f031b2058363c8a4983722135a92e5a5b10cc05"} Nov 24 13:43:43 crc kubenswrapper[5039]: I1124 13:43:43.901313 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1c245130-8f33-4226-b312-9573746acd0f","Type":"ContainerStarted","Data":"f5b6c8fff93ed10f733a256967dfbfaff933f730043c475a474293aa43b6a771"} Nov 24 13:43:43 crc kubenswrapper[5039]: I1124 13:43:43.901492 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1c245130-8f33-4226-b312-9573746acd0f","Type":"ContainerStarted","Data":"ada40cae8c6124c31ff47bda3658ce14b26aa862a7ac08fff10d1dbc2affcb3e"} Nov 24 13:43:43 crc kubenswrapper[5039]: I1124 13:43:43.901598 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1c245130-8f33-4226-b312-9573746acd0f","Type":"ContainerStarted","Data":"d4bf9b56ea13502d9085fa7206be6b76759dd5b116dc2661016c24e8a37c1f61"} Nov 24 13:43:43 crc kubenswrapper[5039]: I1124 13:43:43.942008 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.941974568 podStartE2EDuration="2.941974568s" podCreationTimestamp="2025-11-24 13:43:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:43:43.923552917 +0000 UTC m=+1536.362677447" watchObservedRunningTime="2025-11-24 13:43:43.941974568 +0000 UTC m=+1536.381099078" Nov 24 13:43:43 crc kubenswrapper[5039]: I1124 13:43:43.954722 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.954701269 podStartE2EDuration="2.954701269s" podCreationTimestamp="2025-11-24 13:43:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:43:43.940542662 +0000 UTC m=+1536.379667182" watchObservedRunningTime="2025-11-24 13:43:43.954701269 +0000 UTC m=+1536.393825779" Nov 24 13:43:45 crc kubenswrapper[5039]: I1124 13:43:45.269005 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 24 13:43:47 crc kubenswrapper[5039]: I1124 13:43:47.367689 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 24 13:43:47 crc kubenswrapper[5039]: I1124 13:43:47.367970 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 24 13:43:48 crc kubenswrapper[5039]: I1124 13:43:48.313088 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:43:48 crc kubenswrapper[5039]: E1124 13:43:48.313412 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:43:50 crc kubenswrapper[5039]: I1124 13:43:50.270098 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 24 13:43:50 crc kubenswrapper[5039]: I1124 13:43:50.322451 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 24 13:43:51 crc kubenswrapper[5039]: I1124 13:43:51.016306 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 24 13:43:52 crc kubenswrapper[5039]: I1124 13:43:52.319624 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 13:43:52 crc kubenswrapper[5039]: I1124 13:43:52.319676 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 13:43:52 crc kubenswrapper[5039]: I1124 13:43:52.367010 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 24 13:43:52 crc kubenswrapper[5039]: I1124 13:43:52.367049 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 24 13:43:53 crc kubenswrapper[5039]: I1124 13:43:53.324785 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="42be8bb1-8823-4a1f-8777-348baedb7758" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.249:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 24 13:43:53 crc kubenswrapper[5039]: I1124 13:43:53.324815 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="42be8bb1-8823-4a1f-8777-348baedb7758" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.249:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 24 13:43:53 crc kubenswrapper[5039]: I1124 13:43:53.378822 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="1c245130-8f33-4226-b312-9573746acd0f" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.250:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 24 13:43:53 crc kubenswrapper[5039]: I1124 13:43:53.378863 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="1c245130-8f33-4226-b312-9573746acd0f" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.250:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 24 13:43:53 crc kubenswrapper[5039]: E1124 13:43:53.747321 5039 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a180464_260b_456c_bb96_c83c69cd2258.slice/crio-92bf6ee72ff68a7a7298ff2aef698fb4a54e003083f0b9360c1eb721ffab2693\": RecentStats: unable to find data in memory cache]" Nov 24 13:44:01 crc kubenswrapper[5039]: I1124 13:44:01.307668 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:44:01 crc kubenswrapper[5039]: E1124 13:44:01.308245 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:44:02 crc kubenswrapper[5039]: I1124 13:44:02.147858 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 24 13:44:02 crc kubenswrapper[5039]: I1124 13:44:02.324151 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 24 13:44:02 crc kubenswrapper[5039]: I1124 13:44:02.324642 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 24 13:44:02 crc kubenswrapper[5039]: I1124 13:44:02.324678 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 24 13:44:02 crc kubenswrapper[5039]: I1124 13:44:02.335672 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 24 13:44:02 crc kubenswrapper[5039]: I1124 13:44:02.371312 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 24 13:44:02 crc kubenswrapper[5039]: I1124 13:44:02.372945 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 24 13:44:02 crc kubenswrapper[5039]: I1124 13:44:02.375950 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 24 13:44:03 crc kubenswrapper[5039]: I1124 13:44:03.119142 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 24 13:44:03 crc kubenswrapper[5039]: I1124 13:44:03.127919 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 24 13:44:03 crc kubenswrapper[5039]: I1124 13:44:03.129778 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 24 13:44:04 crc kubenswrapper[5039]: E1124 13:44:04.032544 5039 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a180464_260b_456c_bb96_c83c69cd2258.slice/crio-92bf6ee72ff68a7a7298ff2aef698fb4a54e003083f0b9360c1eb721ffab2693\": RecentStats: unable to find data in memory cache]" Nov 24 13:44:14 crc kubenswrapper[5039]: I1124 13:44:14.115037 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-qvrwf"] Nov 24 13:44:14 crc kubenswrapper[5039]: I1124 13:44:14.130454 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-qvrwf"] Nov 24 13:44:14 crc kubenswrapper[5039]: I1124 13:44:14.223523 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-bgfrt"] Nov 24 13:44:14 crc kubenswrapper[5039]: I1124 13:44:14.226085 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-bgfrt" Nov 24 13:44:14 crc kubenswrapper[5039]: I1124 13:44:14.239145 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-bgfrt"] Nov 24 13:44:14 crc kubenswrapper[5039]: I1124 13:44:14.272806 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0be1e28f-a5d0-4685-b76f-5e074a81fe93-combined-ca-bundle\") pod \"heat-db-sync-bgfrt\" (UID: \"0be1e28f-a5d0-4685-b76f-5e074a81fe93\") " pod="openstack/heat-db-sync-bgfrt" Nov 24 13:44:14 crc kubenswrapper[5039]: I1124 13:44:14.273077 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0be1e28f-a5d0-4685-b76f-5e074a81fe93-config-data\") pod \"heat-db-sync-bgfrt\" (UID: \"0be1e28f-a5d0-4685-b76f-5e074a81fe93\") " pod="openstack/heat-db-sync-bgfrt" Nov 24 13:44:14 crc kubenswrapper[5039]: I1124 13:44:14.273276 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7qm9\" (UniqueName: \"kubernetes.io/projected/0be1e28f-a5d0-4685-b76f-5e074a81fe93-kube-api-access-d7qm9\") pod \"heat-db-sync-bgfrt\" (UID: \"0be1e28f-a5d0-4685-b76f-5e074a81fe93\") " pod="openstack/heat-db-sync-bgfrt" Nov 24 13:44:14 crc kubenswrapper[5039]: I1124 13:44:14.319060 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c0313ce-4944-4fad-bce0-47d60b273f69" path="/var/lib/kubelet/pods/7c0313ce-4944-4fad-bce0-47d60b273f69/volumes" Nov 24 13:44:14 crc kubenswrapper[5039]: I1124 13:44:14.374863 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0be1e28f-a5d0-4685-b76f-5e074a81fe93-config-data\") pod \"heat-db-sync-bgfrt\" (UID: \"0be1e28f-a5d0-4685-b76f-5e074a81fe93\") " pod="openstack/heat-db-sync-bgfrt" Nov 24 13:44:14 crc kubenswrapper[5039]: I1124 13:44:14.375109 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7qm9\" (UniqueName: \"kubernetes.io/projected/0be1e28f-a5d0-4685-b76f-5e074a81fe93-kube-api-access-d7qm9\") pod \"heat-db-sync-bgfrt\" (UID: \"0be1e28f-a5d0-4685-b76f-5e074a81fe93\") " pod="openstack/heat-db-sync-bgfrt" Nov 24 13:44:14 crc kubenswrapper[5039]: I1124 13:44:14.376082 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0be1e28f-a5d0-4685-b76f-5e074a81fe93-combined-ca-bundle\") pod \"heat-db-sync-bgfrt\" (UID: \"0be1e28f-a5d0-4685-b76f-5e074a81fe93\") " pod="openstack/heat-db-sync-bgfrt" Nov 24 13:44:14 crc kubenswrapper[5039]: I1124 13:44:14.394065 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0be1e28f-a5d0-4685-b76f-5e074a81fe93-combined-ca-bundle\") pod \"heat-db-sync-bgfrt\" (UID: \"0be1e28f-a5d0-4685-b76f-5e074a81fe93\") " pod="openstack/heat-db-sync-bgfrt" Nov 24 13:44:14 crc kubenswrapper[5039]: I1124 13:44:14.394697 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0be1e28f-a5d0-4685-b76f-5e074a81fe93-config-data\") pod \"heat-db-sync-bgfrt\" (UID: \"0be1e28f-a5d0-4685-b76f-5e074a81fe93\") " pod="openstack/heat-db-sync-bgfrt" Nov 24 13:44:14 crc kubenswrapper[5039]: I1124 13:44:14.400950 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7qm9\" (UniqueName: \"kubernetes.io/projected/0be1e28f-a5d0-4685-b76f-5e074a81fe93-kube-api-access-d7qm9\") pod \"heat-db-sync-bgfrt\" (UID: \"0be1e28f-a5d0-4685-b76f-5e074a81fe93\") " pod="openstack/heat-db-sync-bgfrt" Nov 24 13:44:14 crc kubenswrapper[5039]: I1124 13:44:14.572129 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-bgfrt" Nov 24 13:44:15 crc kubenswrapper[5039]: I1124 13:44:15.046268 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-bgfrt"] Nov 24 13:44:15 crc kubenswrapper[5039]: I1124 13:44:15.268053 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-bgfrt" event={"ID":"0be1e28f-a5d0-4685-b76f-5e074a81fe93","Type":"ContainerStarted","Data":"abd437b055840bb712aabd8d4db791999c9c6070e8daad1582d4bcd2318c8082"} Nov 24 13:44:15 crc kubenswrapper[5039]: I1124 13:44:15.309477 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:44:15 crc kubenswrapper[5039]: E1124 13:44:15.310087 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:44:16 crc kubenswrapper[5039]: I1124 13:44:16.375998 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:44:16 crc kubenswrapper[5039]: I1124 13:44:16.376771 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="817cea2c-2542-4f9c-8a3f-7ff06f359112" containerName="proxy-httpd" containerID="cri-o://3d4b495984ba1d5382ba65e0df5ecb31574a09eed59fa445267d24869f8f2b5c" gracePeriod=30 Nov 24 13:44:16 crc kubenswrapper[5039]: I1124 13:44:16.376808 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="817cea2c-2542-4f9c-8a3f-7ff06f359112" containerName="sg-core" containerID="cri-o://91e400ca05181715fa556a024bf2780ac2d4d9d63f6d11873a8a7800988ed86c" gracePeriod=30 Nov 24 13:44:16 crc kubenswrapper[5039]: I1124 13:44:16.376787 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="817cea2c-2542-4f9c-8a3f-7ff06f359112" containerName="ceilometer-central-agent" containerID="cri-o://5b1d821ad540544a5e9ae893157806608c71bb04c9e5e6b334a7dd2ed168de4d" gracePeriod=30 Nov 24 13:44:16 crc kubenswrapper[5039]: I1124 13:44:16.376967 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="817cea2c-2542-4f9c-8a3f-7ff06f359112" containerName="ceilometer-notification-agent" containerID="cri-o://6bf2f764b1d8cdacc5168f586602beae4dec8e5b2471541ab8fb1c13bc5a9e5a" gracePeriod=30 Nov 24 13:44:16 crc kubenswrapper[5039]: I1124 13:44:16.850735 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 13:44:17 crc kubenswrapper[5039]: I1124 13:44:17.298082 5039 generic.go:334] "Generic (PLEG): container finished" podID="817cea2c-2542-4f9c-8a3f-7ff06f359112" containerID="3d4b495984ba1d5382ba65e0df5ecb31574a09eed59fa445267d24869f8f2b5c" exitCode=0 Nov 24 13:44:17 crc kubenswrapper[5039]: I1124 13:44:17.298457 5039 generic.go:334] "Generic (PLEG): container finished" podID="817cea2c-2542-4f9c-8a3f-7ff06f359112" containerID="91e400ca05181715fa556a024bf2780ac2d4d9d63f6d11873a8a7800988ed86c" exitCode=2 Nov 24 13:44:17 crc kubenswrapper[5039]: I1124 13:44:17.298467 5039 generic.go:334] "Generic (PLEG): container finished" podID="817cea2c-2542-4f9c-8a3f-7ff06f359112" containerID="5b1d821ad540544a5e9ae893157806608c71bb04c9e5e6b334a7dd2ed168de4d" exitCode=0 Nov 24 13:44:17 crc kubenswrapper[5039]: I1124 13:44:17.298267 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"817cea2c-2542-4f9c-8a3f-7ff06f359112","Type":"ContainerDied","Data":"3d4b495984ba1d5382ba65e0df5ecb31574a09eed59fa445267d24869f8f2b5c"} Nov 24 13:44:17 crc kubenswrapper[5039]: I1124 13:44:17.298527 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"817cea2c-2542-4f9c-8a3f-7ff06f359112","Type":"ContainerDied","Data":"91e400ca05181715fa556a024bf2780ac2d4d9d63f6d11873a8a7800988ed86c"} Nov 24 13:44:17 crc kubenswrapper[5039]: I1124 13:44:17.298544 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"817cea2c-2542-4f9c-8a3f-7ff06f359112","Type":"ContainerDied","Data":"5b1d821ad540544a5e9ae893157806608c71bb04c9e5e6b334a7dd2ed168de4d"} Nov 24 13:44:18 crc kubenswrapper[5039]: I1124 13:44:18.145576 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.017456 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.189655 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-config-data\") pod \"817cea2c-2542-4f9c-8a3f-7ff06f359112\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.189727 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-ceilometer-tls-certs\") pod \"817cea2c-2542-4f9c-8a3f-7ff06f359112\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.189906 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-sg-core-conf-yaml\") pod \"817cea2c-2542-4f9c-8a3f-7ff06f359112\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.189941 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-scripts\") pod \"817cea2c-2542-4f9c-8a3f-7ff06f359112\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.189980 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-combined-ca-bundle\") pod \"817cea2c-2542-4f9c-8a3f-7ff06f359112\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.190042 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/817cea2c-2542-4f9c-8a3f-7ff06f359112-run-httpd\") pod \"817cea2c-2542-4f9c-8a3f-7ff06f359112\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.190379 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/817cea2c-2542-4f9c-8a3f-7ff06f359112-log-httpd\") pod \"817cea2c-2542-4f9c-8a3f-7ff06f359112\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.190429 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w29zl\" (UniqueName: \"kubernetes.io/projected/817cea2c-2542-4f9c-8a3f-7ff06f359112-kube-api-access-w29zl\") pod \"817cea2c-2542-4f9c-8a3f-7ff06f359112\" (UID: \"817cea2c-2542-4f9c-8a3f-7ff06f359112\") " Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.190676 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/817cea2c-2542-4f9c-8a3f-7ff06f359112-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "817cea2c-2542-4f9c-8a3f-7ff06f359112" (UID: "817cea2c-2542-4f9c-8a3f-7ff06f359112"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.190900 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/817cea2c-2542-4f9c-8a3f-7ff06f359112-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "817cea2c-2542-4f9c-8a3f-7ff06f359112" (UID: "817cea2c-2542-4f9c-8a3f-7ff06f359112"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.191375 5039 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/817cea2c-2542-4f9c-8a3f-7ff06f359112-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.191397 5039 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/817cea2c-2542-4f9c-8a3f-7ff06f359112-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.209274 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/817cea2c-2542-4f9c-8a3f-7ff06f359112-kube-api-access-w29zl" (OuterVolumeSpecName: "kube-api-access-w29zl") pod "817cea2c-2542-4f9c-8a3f-7ff06f359112" (UID: "817cea2c-2542-4f9c-8a3f-7ff06f359112"). InnerVolumeSpecName "kube-api-access-w29zl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.218857 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-scripts" (OuterVolumeSpecName: "scripts") pod "817cea2c-2542-4f9c-8a3f-7ff06f359112" (UID: "817cea2c-2542-4f9c-8a3f-7ff06f359112"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.234222 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "817cea2c-2542-4f9c-8a3f-7ff06f359112" (UID: "817cea2c-2542-4f9c-8a3f-7ff06f359112"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.299383 5039 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.299415 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.299424 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w29zl\" (UniqueName: \"kubernetes.io/projected/817cea2c-2542-4f9c-8a3f-7ff06f359112-kube-api-access-w29zl\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.302737 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "817cea2c-2542-4f9c-8a3f-7ff06f359112" (UID: "817cea2c-2542-4f9c-8a3f-7ff06f359112"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.329256 5039 generic.go:334] "Generic (PLEG): container finished" podID="817cea2c-2542-4f9c-8a3f-7ff06f359112" containerID="6bf2f764b1d8cdacc5168f586602beae4dec8e5b2471541ab8fb1c13bc5a9e5a" exitCode=0 Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.329631 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"817cea2c-2542-4f9c-8a3f-7ff06f359112","Type":"ContainerDied","Data":"6bf2f764b1d8cdacc5168f586602beae4dec8e5b2471541ab8fb1c13bc5a9e5a"} Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.329735 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.329887 5039 scope.go:117] "RemoveContainer" containerID="3d4b495984ba1d5382ba65e0df5ecb31574a09eed59fa445267d24869f8f2b5c" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.329734 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"817cea2c-2542-4f9c-8a3f-7ff06f359112","Type":"ContainerDied","Data":"0edaff7fe8006e5bbec10c359f2655471597205172bad9d44820bb2045563586"} Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.335759 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "817cea2c-2542-4f9c-8a3f-7ff06f359112" (UID: "817cea2c-2542-4f9c-8a3f-7ff06f359112"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.357390 5039 scope.go:117] "RemoveContainer" containerID="91e400ca05181715fa556a024bf2780ac2d4d9d63f6d11873a8a7800988ed86c" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.362144 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-config-data" (OuterVolumeSpecName: "config-data") pod "817cea2c-2542-4f9c-8a3f-7ff06f359112" (UID: "817cea2c-2542-4f9c-8a3f-7ff06f359112"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.401643 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.401682 5039 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.401697 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/817cea2c-2542-4f9c-8a3f-7ff06f359112-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.410288 5039 scope.go:117] "RemoveContainer" containerID="6bf2f764b1d8cdacc5168f586602beae4dec8e5b2471541ab8fb1c13bc5a9e5a" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.476178 5039 scope.go:117] "RemoveContainer" containerID="5b1d821ad540544a5e9ae893157806608c71bb04c9e5e6b334a7dd2ed168de4d" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.547951 5039 scope.go:117] "RemoveContainer" containerID="3d4b495984ba1d5382ba65e0df5ecb31574a09eed59fa445267d24869f8f2b5c" Nov 24 13:44:19 crc kubenswrapper[5039]: E1124 13:44:19.548578 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d4b495984ba1d5382ba65e0df5ecb31574a09eed59fa445267d24869f8f2b5c\": container with ID starting with 3d4b495984ba1d5382ba65e0df5ecb31574a09eed59fa445267d24869f8f2b5c not found: ID does not exist" containerID="3d4b495984ba1d5382ba65e0df5ecb31574a09eed59fa445267d24869f8f2b5c" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.548606 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d4b495984ba1d5382ba65e0df5ecb31574a09eed59fa445267d24869f8f2b5c"} err="failed to get container status \"3d4b495984ba1d5382ba65e0df5ecb31574a09eed59fa445267d24869f8f2b5c\": rpc error: code = NotFound desc = could not find container \"3d4b495984ba1d5382ba65e0df5ecb31574a09eed59fa445267d24869f8f2b5c\": container with ID starting with 3d4b495984ba1d5382ba65e0df5ecb31574a09eed59fa445267d24869f8f2b5c not found: ID does not exist" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.548629 5039 scope.go:117] "RemoveContainer" containerID="91e400ca05181715fa556a024bf2780ac2d4d9d63f6d11873a8a7800988ed86c" Nov 24 13:44:19 crc kubenswrapper[5039]: E1124 13:44:19.548926 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91e400ca05181715fa556a024bf2780ac2d4d9d63f6d11873a8a7800988ed86c\": container with ID starting with 91e400ca05181715fa556a024bf2780ac2d4d9d63f6d11873a8a7800988ed86c not found: ID does not exist" containerID="91e400ca05181715fa556a024bf2780ac2d4d9d63f6d11873a8a7800988ed86c" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.548960 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91e400ca05181715fa556a024bf2780ac2d4d9d63f6d11873a8a7800988ed86c"} err="failed to get container status \"91e400ca05181715fa556a024bf2780ac2d4d9d63f6d11873a8a7800988ed86c\": rpc error: code = NotFound desc = could not find container \"91e400ca05181715fa556a024bf2780ac2d4d9d63f6d11873a8a7800988ed86c\": container with ID starting with 91e400ca05181715fa556a024bf2780ac2d4d9d63f6d11873a8a7800988ed86c not found: ID does not exist" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.548985 5039 scope.go:117] "RemoveContainer" containerID="6bf2f764b1d8cdacc5168f586602beae4dec8e5b2471541ab8fb1c13bc5a9e5a" Nov 24 13:44:19 crc kubenswrapper[5039]: E1124 13:44:19.549320 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6bf2f764b1d8cdacc5168f586602beae4dec8e5b2471541ab8fb1c13bc5a9e5a\": container with ID starting with 6bf2f764b1d8cdacc5168f586602beae4dec8e5b2471541ab8fb1c13bc5a9e5a not found: ID does not exist" containerID="6bf2f764b1d8cdacc5168f586602beae4dec8e5b2471541ab8fb1c13bc5a9e5a" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.549344 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6bf2f764b1d8cdacc5168f586602beae4dec8e5b2471541ab8fb1c13bc5a9e5a"} err="failed to get container status \"6bf2f764b1d8cdacc5168f586602beae4dec8e5b2471541ab8fb1c13bc5a9e5a\": rpc error: code = NotFound desc = could not find container \"6bf2f764b1d8cdacc5168f586602beae4dec8e5b2471541ab8fb1c13bc5a9e5a\": container with ID starting with 6bf2f764b1d8cdacc5168f586602beae4dec8e5b2471541ab8fb1c13bc5a9e5a not found: ID does not exist" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.549361 5039 scope.go:117] "RemoveContainer" containerID="5b1d821ad540544a5e9ae893157806608c71bb04c9e5e6b334a7dd2ed168de4d" Nov 24 13:44:19 crc kubenswrapper[5039]: E1124 13:44:19.549766 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b1d821ad540544a5e9ae893157806608c71bb04c9e5e6b334a7dd2ed168de4d\": container with ID starting with 5b1d821ad540544a5e9ae893157806608c71bb04c9e5e6b334a7dd2ed168de4d not found: ID does not exist" containerID="5b1d821ad540544a5e9ae893157806608c71bb04c9e5e6b334a7dd2ed168de4d" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.549785 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b1d821ad540544a5e9ae893157806608c71bb04c9e5e6b334a7dd2ed168de4d"} err="failed to get container status \"5b1d821ad540544a5e9ae893157806608c71bb04c9e5e6b334a7dd2ed168de4d\": rpc error: code = NotFound desc = could not find container \"5b1d821ad540544a5e9ae893157806608c71bb04c9e5e6b334a7dd2ed168de4d\": container with ID starting with 5b1d821ad540544a5e9ae893157806608c71bb04c9e5e6b334a7dd2ed168de4d not found: ID does not exist" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.677581 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.703882 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.732794 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:44:19 crc kubenswrapper[5039]: E1124 13:44:19.733325 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="817cea2c-2542-4f9c-8a3f-7ff06f359112" containerName="sg-core" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.733347 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="817cea2c-2542-4f9c-8a3f-7ff06f359112" containerName="sg-core" Nov 24 13:44:19 crc kubenswrapper[5039]: E1124 13:44:19.733360 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="817cea2c-2542-4f9c-8a3f-7ff06f359112" containerName="proxy-httpd" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.733368 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="817cea2c-2542-4f9c-8a3f-7ff06f359112" containerName="proxy-httpd" Nov 24 13:44:19 crc kubenswrapper[5039]: E1124 13:44:19.733377 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="817cea2c-2542-4f9c-8a3f-7ff06f359112" containerName="ceilometer-notification-agent" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.733384 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="817cea2c-2542-4f9c-8a3f-7ff06f359112" containerName="ceilometer-notification-agent" Nov 24 13:44:19 crc kubenswrapper[5039]: E1124 13:44:19.733415 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="817cea2c-2542-4f9c-8a3f-7ff06f359112" containerName="ceilometer-central-agent" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.733422 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="817cea2c-2542-4f9c-8a3f-7ff06f359112" containerName="ceilometer-central-agent" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.733676 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="817cea2c-2542-4f9c-8a3f-7ff06f359112" containerName="ceilometer-notification-agent" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.733697 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="817cea2c-2542-4f9c-8a3f-7ff06f359112" containerName="proxy-httpd" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.733719 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="817cea2c-2542-4f9c-8a3f-7ff06f359112" containerName="ceilometer-central-agent" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.733732 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="817cea2c-2542-4f9c-8a3f-7ff06f359112" containerName="sg-core" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.736128 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.742935 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.746666 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.746888 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.747038 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.830845 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-config-data\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.831067 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.831329 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/831e28f3-74a0-4b52-933c-1a3e7a7811f6-log-httpd\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.831378 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-scripts\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.831413 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.831584 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/831e28f3-74a0-4b52-933c-1a3e7a7811f6-run-httpd\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.831698 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhg7n\" (UniqueName: \"kubernetes.io/projected/831e28f3-74a0-4b52-933c-1a3e7a7811f6-kube-api-access-bhg7n\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.831827 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.933884 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/831e28f3-74a0-4b52-933c-1a3e7a7811f6-log-httpd\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.933927 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-scripts\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.933944 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.934000 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/831e28f3-74a0-4b52-933c-1a3e7a7811f6-run-httpd\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.934065 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhg7n\" (UniqueName: \"kubernetes.io/projected/831e28f3-74a0-4b52-933c-1a3e7a7811f6-kube-api-access-bhg7n\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.934192 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.934253 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-config-data\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.934450 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/831e28f3-74a0-4b52-933c-1a3e7a7811f6-log-httpd\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.934561 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/831e28f3-74a0-4b52-933c-1a3e7a7811f6-run-httpd\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.934654 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.939829 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-config-data\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.942887 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.943585 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-scripts\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.946976 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.958265 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:19 crc kubenswrapper[5039]: I1124 13:44:19.977992 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhg7n\" (UniqueName: \"kubernetes.io/projected/831e28f3-74a0-4b52-933c-1a3e7a7811f6-kube-api-access-bhg7n\") pod \"ceilometer-0\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " pod="openstack/ceilometer-0" Nov 24 13:44:20 crc kubenswrapper[5039]: I1124 13:44:20.061102 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 13:44:20 crc kubenswrapper[5039]: I1124 13:44:20.329360 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="817cea2c-2542-4f9c-8a3f-7ff06f359112" path="/var/lib/kubelet/pods/817cea2c-2542-4f9c-8a3f-7ff06f359112/volumes" Nov 24 13:44:20 crc kubenswrapper[5039]: I1124 13:44:20.637272 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 13:44:21 crc kubenswrapper[5039]: I1124 13:44:21.386044 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"831e28f3-74a0-4b52-933c-1a3e7a7811f6","Type":"ContainerStarted","Data":"e3513345df51b6d01f88fab933a5b16e8579882d069408fa15fc49b9fa157ec5"} Nov 24 13:44:21 crc kubenswrapper[5039]: I1124 13:44:21.562110 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="6808fd4e-3718-430c-87e8-ca3e801a8248" containerName="rabbitmq" containerID="cri-o://56d5a0d96539de067af8cdca011d9daf93150bc8e470750cbef39cb456bf330d" gracePeriod=604796 Nov 24 13:44:22 crc kubenswrapper[5039]: I1124 13:44:22.850162 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="8e2e73c0-db1d-45e0-b056-0ed13bdbb904" containerName="rabbitmq" containerID="cri-o://3ee43750eeed8c71cd0a28ca11d7eb172974b9ffbe34600ac7ee2219070b935b" gracePeriod=604796 Nov 24 13:44:27 crc kubenswrapper[5039]: I1124 13:44:27.307478 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:44:27 crc kubenswrapper[5039]: E1124 13:44:27.308131 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:44:27 crc kubenswrapper[5039]: I1124 13:44:27.563969 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="6808fd4e-3718-430c-87e8-ca3e801a8248" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.122:5671: connect: connection refused" Nov 24 13:44:27 crc kubenswrapper[5039]: I1124 13:44:27.980147 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="8e2e73c0-db1d-45e0-b056-0ed13bdbb904" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.123:5671: connect: connection refused" Nov 24 13:44:28 crc kubenswrapper[5039]: I1124 13:44:28.474690 5039 generic.go:334] "Generic (PLEG): container finished" podID="6808fd4e-3718-430c-87e8-ca3e801a8248" containerID="56d5a0d96539de067af8cdca011d9daf93150bc8e470750cbef39cb456bf330d" exitCode=0 Nov 24 13:44:28 crc kubenswrapper[5039]: I1124 13:44:28.474770 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"6808fd4e-3718-430c-87e8-ca3e801a8248","Type":"ContainerDied","Data":"56d5a0d96539de067af8cdca011d9daf93150bc8e470750cbef39cb456bf330d"} Nov 24 13:44:29 crc kubenswrapper[5039]: I1124 13:44:29.500978 5039 generic.go:334] "Generic (PLEG): container finished" podID="8e2e73c0-db1d-45e0-b056-0ed13bdbb904" containerID="3ee43750eeed8c71cd0a28ca11d7eb172974b9ffbe34600ac7ee2219070b935b" exitCode=0 Nov 24 13:44:29 crc kubenswrapper[5039]: I1124 13:44:29.501044 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"8e2e73c0-db1d-45e0-b056-0ed13bdbb904","Type":"ContainerDied","Data":"3ee43750eeed8c71cd0a28ca11d7eb172974b9ffbe34600ac7ee2219070b935b"} Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.527279 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-68df85789f-wdmkf"] Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.529392 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.533254 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.609468 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-wdmkf"] Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.617822 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-wdmkf\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.617874 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-dns-svc\") pod \"dnsmasq-dns-68df85789f-wdmkf\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.617932 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-wdmkf\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.617962 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-wdmkf\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.617992 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-config\") pod \"dnsmasq-dns-68df85789f-wdmkf\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.618041 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpkq7\" (UniqueName: \"kubernetes.io/projected/3fd60732-1a85-4f00-88f8-5db8885ff122-kube-api-access-mpkq7\") pod \"dnsmasq-dns-68df85789f-wdmkf\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.618081 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-wdmkf\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.720404 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpkq7\" (UniqueName: \"kubernetes.io/projected/3fd60732-1a85-4f00-88f8-5db8885ff122-kube-api-access-mpkq7\") pod \"dnsmasq-dns-68df85789f-wdmkf\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.720560 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-wdmkf\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.720655 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-wdmkf\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.720700 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-dns-svc\") pod \"dnsmasq-dns-68df85789f-wdmkf\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.720817 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-wdmkf\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.720881 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-wdmkf\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.720950 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-config\") pod \"dnsmasq-dns-68df85789f-wdmkf\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.721664 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-wdmkf\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.721982 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-dns-svc\") pod \"dnsmasq-dns-68df85789f-wdmkf\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.722010 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-wdmkf\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.722045 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-wdmkf\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.722603 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-wdmkf\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.723704 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-config\") pod \"dnsmasq-dns-68df85789f-wdmkf\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.742965 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpkq7\" (UniqueName: \"kubernetes.io/projected/3fd60732-1a85-4f00-88f8-5db8885ff122-kube-api-access-mpkq7\") pod \"dnsmasq-dns-68df85789f-wdmkf\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:32 crc kubenswrapper[5039]: I1124 13:44:32.857097 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.346717 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.537845 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6808fd4e-3718-430c-87e8-ca3e801a8248-erlang-cookie-secret\") pod \"6808fd4e-3718-430c-87e8-ca3e801a8248\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.538040 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6808fd4e-3718-430c-87e8-ca3e801a8248-server-conf\") pod \"6808fd4e-3718-430c-87e8-ca3e801a8248\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.538087 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-erlang-cookie\") pod \"6808fd4e-3718-430c-87e8-ca3e801a8248\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.538165 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sjp8k\" (UniqueName: \"kubernetes.io/projected/6808fd4e-3718-430c-87e8-ca3e801a8248-kube-api-access-sjp8k\") pod \"6808fd4e-3718-430c-87e8-ca3e801a8248\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.538253 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-tls\") pod \"6808fd4e-3718-430c-87e8-ca3e801a8248\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.538292 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-confd\") pod \"6808fd4e-3718-430c-87e8-ca3e801a8248\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.538364 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"6808fd4e-3718-430c-87e8-ca3e801a8248\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.538397 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6808fd4e-3718-430c-87e8-ca3e801a8248-plugins-conf\") pod \"6808fd4e-3718-430c-87e8-ca3e801a8248\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.538447 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6808fd4e-3718-430c-87e8-ca3e801a8248-config-data\") pod \"6808fd4e-3718-430c-87e8-ca3e801a8248\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.538489 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-plugins\") pod \"6808fd4e-3718-430c-87e8-ca3e801a8248\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.538539 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6808fd4e-3718-430c-87e8-ca3e801a8248-pod-info\") pod \"6808fd4e-3718-430c-87e8-ca3e801a8248\" (UID: \"6808fd4e-3718-430c-87e8-ca3e801a8248\") " Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.573177 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "persistence") pod "6808fd4e-3718-430c-87e8-ca3e801a8248" (UID: "6808fd4e-3718-430c-87e8-ca3e801a8248"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.585351 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "6808fd4e-3718-430c-87e8-ca3e801a8248" (UID: "6808fd4e-3718-430c-87e8-ca3e801a8248"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.602856 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"6808fd4e-3718-430c-87e8-ca3e801a8248","Type":"ContainerDied","Data":"f39a2a8966d7dbe143d746bd2913e732bf9eae7c489004c818e692d0ff9707c0"} Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.602913 5039 scope.go:117] "RemoveContainer" containerID="56d5a0d96539de067af8cdca011d9daf93150bc8e470750cbef39cb456bf330d" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.603095 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.608490 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6808fd4e-3718-430c-87e8-ca3e801a8248-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "6808fd4e-3718-430c-87e8-ca3e801a8248" (UID: "6808fd4e-3718-430c-87e8-ca3e801a8248"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.612343 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6808fd4e-3718-430c-87e8-ca3e801a8248-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "6808fd4e-3718-430c-87e8-ca3e801a8248" (UID: "6808fd4e-3718-430c-87e8-ca3e801a8248"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.650074 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "6808fd4e-3718-430c-87e8-ca3e801a8248" (UID: "6808fd4e-3718-430c-87e8-ca3e801a8248"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.653306 5039 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.654576 5039 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6808fd4e-3718-430c-87e8-ca3e801a8248-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.655140 5039 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6808fd4e-3718-430c-87e8-ca3e801a8248-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.655235 5039 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.655295 5039 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.682686 5039 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.685441 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "6808fd4e-3718-430c-87e8-ca3e801a8248" (UID: "6808fd4e-3718-430c-87e8-ca3e801a8248"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.685867 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/6808fd4e-3718-430c-87e8-ca3e801a8248-pod-info" (OuterVolumeSpecName: "pod-info") pod "6808fd4e-3718-430c-87e8-ca3e801a8248" (UID: "6808fd4e-3718-430c-87e8-ca3e801a8248"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.686006 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6808fd4e-3718-430c-87e8-ca3e801a8248-config-data" (OuterVolumeSpecName: "config-data") pod "6808fd4e-3718-430c-87e8-ca3e801a8248" (UID: "6808fd4e-3718-430c-87e8-ca3e801a8248"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.687120 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6808fd4e-3718-430c-87e8-ca3e801a8248-server-conf" (OuterVolumeSpecName: "server-conf") pod "6808fd4e-3718-430c-87e8-ca3e801a8248" (UID: "6808fd4e-3718-430c-87e8-ca3e801a8248"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.689996 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6808fd4e-3718-430c-87e8-ca3e801a8248-kube-api-access-sjp8k" (OuterVolumeSpecName: "kube-api-access-sjp8k") pod "6808fd4e-3718-430c-87e8-ca3e801a8248" (UID: "6808fd4e-3718-430c-87e8-ca3e801a8248"). InnerVolumeSpecName "kube-api-access-sjp8k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.758141 5039 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6808fd4e-3718-430c-87e8-ca3e801a8248-server-conf\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.758187 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sjp8k\" (UniqueName: \"kubernetes.io/projected/6808fd4e-3718-430c-87e8-ca3e801a8248-kube-api-access-sjp8k\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.758202 5039 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.758212 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6808fd4e-3718-430c-87e8-ca3e801a8248-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.758223 5039 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.758233 5039 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6808fd4e-3718-430c-87e8-ca3e801a8248-pod-info\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.799392 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "6808fd4e-3718-430c-87e8-ca3e801a8248" (UID: "6808fd4e-3718-430c-87e8-ca3e801a8248"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.859660 5039 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6808fd4e-3718-430c-87e8-ca3e801a8248-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:33 crc kubenswrapper[5039]: I1124 13:44:33.989924 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.015815 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.039647 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 13:44:34 crc kubenswrapper[5039]: E1124 13:44:34.040566 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6808fd4e-3718-430c-87e8-ca3e801a8248" containerName="setup-container" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.040582 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="6808fd4e-3718-430c-87e8-ca3e801a8248" containerName="setup-container" Nov 24 13:44:34 crc kubenswrapper[5039]: E1124 13:44:34.040612 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6808fd4e-3718-430c-87e8-ca3e801a8248" containerName="rabbitmq" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.040621 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="6808fd4e-3718-430c-87e8-ca3e801a8248" containerName="rabbitmq" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.040945 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="6808fd4e-3718-430c-87e8-ca3e801a8248" containerName="rabbitmq" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.042715 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.045455 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-5pt4p" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.045849 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.046011 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.046167 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.046304 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.046485 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.046683 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.059060 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.168032 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b820e90e-779c-4300-b0e0-affe5118e73f-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.168121 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b820e90e-779c-4300-b0e0-affe5118e73f-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.168161 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b820e90e-779c-4300-b0e0-affe5118e73f-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.168203 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b820e90e-779c-4300-b0e0-affe5118e73f-config-data\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.168219 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b820e90e-779c-4300-b0e0-affe5118e73f-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.168239 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.168269 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b820e90e-779c-4300-b0e0-affe5118e73f-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.168314 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b820e90e-779c-4300-b0e0-affe5118e73f-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.168336 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftk4h\" (UniqueName: \"kubernetes.io/projected/b820e90e-779c-4300-b0e0-affe5118e73f-kube-api-access-ftk4h\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.168373 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b820e90e-779c-4300-b0e0-affe5118e73f-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.168402 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b820e90e-779c-4300-b0e0-affe5118e73f-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.270403 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b820e90e-779c-4300-b0e0-affe5118e73f-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.270463 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b820e90e-779c-4300-b0e0-affe5118e73f-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.270535 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b820e90e-779c-4300-b0e0-affe5118e73f-config-data\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.270554 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b820e90e-779c-4300-b0e0-affe5118e73f-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.270579 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.270614 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b820e90e-779c-4300-b0e0-affe5118e73f-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.270673 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b820e90e-779c-4300-b0e0-affe5118e73f-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.270703 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftk4h\" (UniqueName: \"kubernetes.io/projected/b820e90e-779c-4300-b0e0-affe5118e73f-kube-api-access-ftk4h\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.270741 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b820e90e-779c-4300-b0e0-affe5118e73f-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.270767 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b820e90e-779c-4300-b0e0-affe5118e73f-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.270818 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b820e90e-779c-4300-b0e0-affe5118e73f-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.271635 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b820e90e-779c-4300-b0e0-affe5118e73f-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.271701 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b820e90e-779c-4300-b0e0-affe5118e73f-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.271860 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b820e90e-779c-4300-b0e0-affe5118e73f-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.271904 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b820e90e-779c-4300-b0e0-affe5118e73f-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.272216 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.272564 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b820e90e-779c-4300-b0e0-affe5118e73f-config-data\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.276254 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b820e90e-779c-4300-b0e0-affe5118e73f-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.278381 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b820e90e-779c-4300-b0e0-affe5118e73f-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.285314 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b820e90e-779c-4300-b0e0-affe5118e73f-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.287466 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b820e90e-779c-4300-b0e0-affe5118e73f-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.289041 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftk4h\" (UniqueName: \"kubernetes.io/projected/b820e90e-779c-4300-b0e0-affe5118e73f-kube-api-access-ftk4h\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.327902 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6808fd4e-3718-430c-87e8-ca3e801a8248" path="/var/lib/kubelet/pods/6808fd4e-3718-430c-87e8-ca3e801a8248/volumes" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.353248 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"b820e90e-779c-4300-b0e0-affe5118e73f\") " pod="openstack/rabbitmq-server-0" Nov 24 13:44:34 crc kubenswrapper[5039]: I1124 13:44:34.378327 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.631917 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"8e2e73c0-db1d-45e0-b056-0ed13bdbb904","Type":"ContainerDied","Data":"959e40d2d54fd0020f51f21d62b14981a6e1c4766b554fe9d2d7ccb9697db7d2"} Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.631960 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="959e40d2d54fd0020f51f21d62b14981a6e1c4766b554fe9d2d7ccb9697db7d2" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.639652 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.713583 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.713663 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-confd\") pod \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.713706 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-plugins\") pod \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.713743 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-pod-info\") pod \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.713806 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-tls\") pod \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.713857 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-config-data\") pod \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.713887 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-plugins-conf\") pod \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.713940 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-erlang-cookie\") pod \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.713998 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-erlang-cookie-secret\") pod \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.714053 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6dwtk\" (UniqueName: \"kubernetes.io/projected/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-kube-api-access-6dwtk\") pod \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.714131 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-server-conf\") pod \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\" (UID: \"8e2e73c0-db1d-45e0-b056-0ed13bdbb904\") " Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.723403 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "8e2e73c0-db1d-45e0-b056-0ed13bdbb904" (UID: "8e2e73c0-db1d-45e0-b056-0ed13bdbb904"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.726833 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "8e2e73c0-db1d-45e0-b056-0ed13bdbb904" (UID: "8e2e73c0-db1d-45e0-b056-0ed13bdbb904"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.728968 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "8e2e73c0-db1d-45e0-b056-0ed13bdbb904" (UID: "8e2e73c0-db1d-45e0-b056-0ed13bdbb904"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.729318 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "8e2e73c0-db1d-45e0-b056-0ed13bdbb904" (UID: "8e2e73c0-db1d-45e0-b056-0ed13bdbb904"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.737338 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "8e2e73c0-db1d-45e0-b056-0ed13bdbb904" (UID: "8e2e73c0-db1d-45e0-b056-0ed13bdbb904"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.746830 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "8e2e73c0-db1d-45e0-b056-0ed13bdbb904" (UID: "8e2e73c0-db1d-45e0-b056-0ed13bdbb904"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.750940 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-kube-api-access-6dwtk" (OuterVolumeSpecName: "kube-api-access-6dwtk") pod "8e2e73c0-db1d-45e0-b056-0ed13bdbb904" (UID: "8e2e73c0-db1d-45e0-b056-0ed13bdbb904"). InnerVolumeSpecName "kube-api-access-6dwtk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.769098 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-pod-info" (OuterVolumeSpecName: "pod-info") pod "8e2e73c0-db1d-45e0-b056-0ed13bdbb904" (UID: "8e2e73c0-db1d-45e0-b056-0ed13bdbb904"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.773847 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-config-data" (OuterVolumeSpecName: "config-data") pod "8e2e73c0-db1d-45e0-b056-0ed13bdbb904" (UID: "8e2e73c0-db1d-45e0-b056-0ed13bdbb904"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.816851 5039 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.816888 5039 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.816901 5039 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-pod-info\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.816911 5039 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.816921 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.816930 5039 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.816941 5039 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.816953 5039 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.816962 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6dwtk\" (UniqueName: \"kubernetes.io/projected/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-kube-api-access-6dwtk\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.852735 5039 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.856794 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-server-conf" (OuterVolumeSpecName: "server-conf") pod "8e2e73c0-db1d-45e0-b056-0ed13bdbb904" (UID: "8e2e73c0-db1d-45e0-b056-0ed13bdbb904"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.899281 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "8e2e73c0-db1d-45e0-b056-0ed13bdbb904" (UID: "8e2e73c0-db1d-45e0-b056-0ed13bdbb904"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.918203 5039 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-server-conf\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.918246 5039 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:35 crc kubenswrapper[5039]: I1124 13:44:35.918257 5039 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8e2e73c0-db1d-45e0-b056-0ed13bdbb904-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.641728 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.670964 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.685252 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.699211 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 13:44:36 crc kubenswrapper[5039]: E1124 13:44:36.699664 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e2e73c0-db1d-45e0-b056-0ed13bdbb904" containerName="rabbitmq" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.699682 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e2e73c0-db1d-45e0-b056-0ed13bdbb904" containerName="rabbitmq" Nov 24 13:44:36 crc kubenswrapper[5039]: E1124 13:44:36.699701 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e2e73c0-db1d-45e0-b056-0ed13bdbb904" containerName="setup-container" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.699709 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e2e73c0-db1d-45e0-b056-0ed13bdbb904" containerName="setup-container" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.700047 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e2e73c0-db1d-45e0-b056-0ed13bdbb904" containerName="rabbitmq" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.701307 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.708277 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.708350 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.708612 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.708690 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.708861 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-qxwdg" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.709023 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.710681 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.713828 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.833390 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c2b248b0-d5b6-4800-9f0a-915f03d73696-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.833446 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c2b248b0-d5b6-4800-9f0a-915f03d73696-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.833474 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c2b248b0-d5b6-4800-9f0a-915f03d73696-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.833522 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c2b248b0-d5b6-4800-9f0a-915f03d73696-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.833609 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c2b248b0-d5b6-4800-9f0a-915f03d73696-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.833675 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c2b248b0-d5b6-4800-9f0a-915f03d73696-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.833754 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.833791 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c2b248b0-d5b6-4800-9f0a-915f03d73696-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.834326 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c2b248b0-d5b6-4800-9f0a-915f03d73696-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.834486 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhl97\" (UniqueName: \"kubernetes.io/projected/c2b248b0-d5b6-4800-9f0a-915f03d73696-kube-api-access-nhl97\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.834547 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c2b248b0-d5b6-4800-9f0a-915f03d73696-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.936950 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c2b248b0-d5b6-4800-9f0a-915f03d73696-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.937001 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c2b248b0-d5b6-4800-9f0a-915f03d73696-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.937330 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c2b248b0-d5b6-4800-9f0a-915f03d73696-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.937422 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c2b248b0-d5b6-4800-9f0a-915f03d73696-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.937450 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c2b248b0-d5b6-4800-9f0a-915f03d73696-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.937703 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.937730 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c2b248b0-d5b6-4800-9f0a-915f03d73696-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.937808 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c2b248b0-d5b6-4800-9f0a-915f03d73696-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.937861 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhl97\" (UniqueName: \"kubernetes.io/projected/c2b248b0-d5b6-4800-9f0a-915f03d73696-kube-api-access-nhl97\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.937898 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c2b248b0-d5b6-4800-9f0a-915f03d73696-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.938230 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c2b248b0-d5b6-4800-9f0a-915f03d73696-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.938166 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c2b248b0-d5b6-4800-9f0a-915f03d73696-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.938784 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c2b248b0-d5b6-4800-9f0a-915f03d73696-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.938813 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.939298 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c2b248b0-d5b6-4800-9f0a-915f03d73696-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.943546 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c2b248b0-d5b6-4800-9f0a-915f03d73696-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.944748 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c2b248b0-d5b6-4800-9f0a-915f03d73696-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.947833 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c2b248b0-d5b6-4800-9f0a-915f03d73696-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.948864 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c2b248b0-d5b6-4800-9f0a-915f03d73696-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.948960 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c2b248b0-d5b6-4800-9f0a-915f03d73696-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.949831 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c2b248b0-d5b6-4800-9f0a-915f03d73696-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.962548 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhl97\" (UniqueName: \"kubernetes.io/projected/c2b248b0-d5b6-4800-9f0a-915f03d73696-kube-api-access-nhl97\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:36 crc kubenswrapper[5039]: I1124 13:44:36.973412 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c2b248b0-d5b6-4800-9f0a-915f03d73696\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:37 crc kubenswrapper[5039]: I1124 13:44:37.023869 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:44:38 crc kubenswrapper[5039]: I1124 13:44:38.340630 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e2e73c0-db1d-45e0-b056-0ed13bdbb904" path="/var/lib/kubelet/pods/8e2e73c0-db1d-45e0-b056-0ed13bdbb904/volumes" Nov 24 13:44:41 crc kubenswrapper[5039]: I1124 13:44:41.306299 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:44:41 crc kubenswrapper[5039]: E1124 13:44:41.306905 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:44:44 crc kubenswrapper[5039]: I1124 13:44:44.728852 5039 scope.go:117] "RemoveContainer" containerID="ea9ea3aa66fddb386be5ad5b6935a74401ac939b2d26eb0fc82ed0db374a0398" Nov 24 13:44:46 crc kubenswrapper[5039]: I1124 13:44:46.286303 5039 scope.go:117] "RemoveContainer" containerID="6e40255f7df394711b21c74b237ac11944fc65febdf994a8a02b4cffa191e21c" Nov 24 13:44:46 crc kubenswrapper[5039]: E1124 13:44:46.351797 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Nov 24 13:44:46 crc kubenswrapper[5039]: E1124 13:44:46.351852 5039 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Nov 24 13:44:46 crc kubenswrapper[5039]: E1124 13:44:46.351975 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-d7qm9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-bgfrt_openstack(0be1e28f-a5d0-4685-b76f-5e074a81fe93): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:44:46 crc kubenswrapper[5039]: E1124 13:44:46.353196 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-bgfrt" podUID="0be1e28f-a5d0-4685-b76f-5e074a81fe93" Nov 24 13:44:46 crc kubenswrapper[5039]: E1124 13:44:46.760648 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-bgfrt" podUID="0be1e28f-a5d0-4685-b76f-5e074a81fe93" Nov 24 13:44:46 crc kubenswrapper[5039]: E1124 13:44:46.838325 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Nov 24 13:44:46 crc kubenswrapper[5039]: I1124 13:44:46.839242 5039 scope.go:117] "RemoveContainer" containerID="ea9ea3aa66fddb386be5ad5b6935a74401ac939b2d26eb0fc82ed0db374a0398" Nov 24 13:44:46 crc kubenswrapper[5039]: E1124 13:44:46.839710 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea9ea3aa66fddb386be5ad5b6935a74401ac939b2d26eb0fc82ed0db374a0398\": container with ID starting with ea9ea3aa66fddb386be5ad5b6935a74401ac939b2d26eb0fc82ed0db374a0398 not found: ID does not exist" containerID="ea9ea3aa66fddb386be5ad5b6935a74401ac939b2d26eb0fc82ed0db374a0398" Nov 24 13:44:46 crc kubenswrapper[5039]: I1124 13:44:46.839739 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea9ea3aa66fddb386be5ad5b6935a74401ac939b2d26eb0fc82ed0db374a0398"} err="failed to get container status \"ea9ea3aa66fddb386be5ad5b6935a74401ac939b2d26eb0fc82ed0db374a0398\": rpc error: code = NotFound desc = could not find container \"ea9ea3aa66fddb386be5ad5b6935a74401ac939b2d26eb0fc82ed0db374a0398\": container with ID starting with ea9ea3aa66fddb386be5ad5b6935a74401ac939b2d26eb0fc82ed0db374a0398 not found: ID does not exist" Nov 24 13:44:46 crc kubenswrapper[5039]: E1124 13:44:46.840654 5039 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Nov 24 13:44:46 crc kubenswrapper[5039]: E1124 13:44:46.841332 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n54ch7h669h675h645h7dh68ch87h684h568h68ch596h66ch658h8bh5ffh87h5cdhbh58fh55fh98h697h67fh545hc5hc7h554h648hch7hc8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bhg7n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(831e28f3-74a0-4b52-933c-1a3e7a7811f6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 13:44:47 crc kubenswrapper[5039]: I1124 13:44:47.327772 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-wdmkf"] Nov 24 13:44:47 crc kubenswrapper[5039]: I1124 13:44:47.427663 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 13:44:47 crc kubenswrapper[5039]: W1124 13:44:47.428443 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc2b248b0_d5b6_4800_9f0a_915f03d73696.slice/crio-8eaa167e750f19ec680da687ddc3eaeb7465a82cf9daab629967e798c0cefce7 WatchSource:0}: Error finding container 8eaa167e750f19ec680da687ddc3eaeb7465a82cf9daab629967e798c0cefce7: Status 404 returned error can't find the container with id 8eaa167e750f19ec680da687ddc3eaeb7465a82cf9daab629967e798c0cefce7 Nov 24 13:44:47 crc kubenswrapper[5039]: W1124 13:44:47.447144 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb820e90e_779c_4300_b0e0_affe5118e73f.slice/crio-642ff4a281000724748dac7e1bd40018c4309ac74c25a63575406a3f3cdc3874 WatchSource:0}: Error finding container 642ff4a281000724748dac7e1bd40018c4309ac74c25a63575406a3f3cdc3874: Status 404 returned error can't find the container with id 642ff4a281000724748dac7e1bd40018c4309ac74c25a63575406a3f3cdc3874 Nov 24 13:44:47 crc kubenswrapper[5039]: I1124 13:44:47.454700 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 13:44:47 crc kubenswrapper[5039]: I1124 13:44:47.770353 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-wdmkf" event={"ID":"3fd60732-1a85-4f00-88f8-5db8885ff122","Type":"ContainerStarted","Data":"ed3da603eaaf797f23b6b0dd76a329c69607dc69e356b95948106c53e0128589"} Nov 24 13:44:47 crc kubenswrapper[5039]: I1124 13:44:47.770401 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-wdmkf" event={"ID":"3fd60732-1a85-4f00-88f8-5db8885ff122","Type":"ContainerStarted","Data":"0fda0cc1ec8f970ac65ecd9de3cb931f62fd4de6c819bb8c5cb21d1b50748902"} Nov 24 13:44:47 crc kubenswrapper[5039]: I1124 13:44:47.771688 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c2b248b0-d5b6-4800-9f0a-915f03d73696","Type":"ContainerStarted","Data":"8eaa167e750f19ec680da687ddc3eaeb7465a82cf9daab629967e798c0cefce7"} Nov 24 13:44:47 crc kubenswrapper[5039]: I1124 13:44:47.772727 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b820e90e-779c-4300-b0e0-affe5118e73f","Type":"ContainerStarted","Data":"642ff4a281000724748dac7e1bd40018c4309ac74c25a63575406a3f3cdc3874"} Nov 24 13:44:47 crc kubenswrapper[5039]: I1124 13:44:47.773832 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"831e28f3-74a0-4b52-933c-1a3e7a7811f6","Type":"ContainerStarted","Data":"676ad49a9cf447ed89ee4328121b62dea57f7d5bac27fc3c01e258a256979041"} Nov 24 13:44:48 crc kubenswrapper[5039]: I1124 13:44:48.790441 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"831e28f3-74a0-4b52-933c-1a3e7a7811f6","Type":"ContainerStarted","Data":"d7ed1854953f5beccc1c80d3aa08f9cc44a999bd3d53f012a7882ae1b779de73"} Nov 24 13:44:48 crc kubenswrapper[5039]: I1124 13:44:48.792289 5039 generic.go:334] "Generic (PLEG): container finished" podID="3fd60732-1a85-4f00-88f8-5db8885ff122" containerID="ed3da603eaaf797f23b6b0dd76a329c69607dc69e356b95948106c53e0128589" exitCode=0 Nov 24 13:44:48 crc kubenswrapper[5039]: I1124 13:44:48.792346 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-wdmkf" event={"ID":"3fd60732-1a85-4f00-88f8-5db8885ff122","Type":"ContainerDied","Data":"ed3da603eaaf797f23b6b0dd76a329c69607dc69e356b95948106c53e0128589"} Nov 24 13:44:49 crc kubenswrapper[5039]: I1124 13:44:49.810430 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b820e90e-779c-4300-b0e0-affe5118e73f","Type":"ContainerStarted","Data":"666cb50e3cd5bf82688f2dddbb37d8be99f6462371f801198859bdcb23a8c40d"} Nov 24 13:44:49 crc kubenswrapper[5039]: I1124 13:44:49.818496 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-wdmkf" event={"ID":"3fd60732-1a85-4f00-88f8-5db8885ff122","Type":"ContainerStarted","Data":"46af6a36325ad7c160e9e794036dc49e142bf2ca167cdbfaf5999e10fd086e62"} Nov 24 13:44:49 crc kubenswrapper[5039]: I1124 13:44:49.819561 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:49 crc kubenswrapper[5039]: I1124 13:44:49.824687 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c2b248b0-d5b6-4800-9f0a-915f03d73696","Type":"ContainerStarted","Data":"6d0424f5b11772df023db286e37acf11ffeb7121aac6159494a360a6f71cf940"} Nov 24 13:44:49 crc kubenswrapper[5039]: I1124 13:44:49.894058 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-68df85789f-wdmkf" podStartSLOduration=17.894040966 podStartE2EDuration="17.894040966s" podCreationTimestamp="2025-11-24 13:44:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:44:49.881973051 +0000 UTC m=+1602.321097571" watchObservedRunningTime="2025-11-24 13:44:49.894040966 +0000 UTC m=+1602.333165466" Nov 24 13:44:49 crc kubenswrapper[5039]: E1124 13:44:49.996770 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" Nov 24 13:44:50 crc kubenswrapper[5039]: I1124 13:44:50.836815 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"831e28f3-74a0-4b52-933c-1a3e7a7811f6","Type":"ContainerStarted","Data":"ef9919777fea4d0846a31936b070b22baec3d41ef9ecdbf05f9bf5dec75dfb41"} Nov 24 13:44:50 crc kubenswrapper[5039]: I1124 13:44:50.837041 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 13:44:50 crc kubenswrapper[5039]: E1124 13:44:50.838644 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" Nov 24 13:44:51 crc kubenswrapper[5039]: E1124 13:44:51.850275 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" Nov 24 13:44:53 crc kubenswrapper[5039]: I1124 13:44:53.307470 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:44:53 crc kubenswrapper[5039]: E1124 13:44:53.308110 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:44:57 crc kubenswrapper[5039]: I1124 13:44:57.858694 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:44:57 crc kubenswrapper[5039]: I1124 13:44:57.937752 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-wpzfw"] Nov 24 13:44:57 crc kubenswrapper[5039]: I1124 13:44:57.938153 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" podUID="66f89de8-d3eb-4aa2-a537-e4a768c732dd" containerName="dnsmasq-dns" containerID="cri-o://a2f4ce3a7ab8c05c67eb8ed15d8d542de4d214b41ddc3ae0eda0977119faba80" gracePeriod=10 Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.185646 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-768b698657-svwhq"] Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.187793 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.205386 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-768b698657-svwhq"] Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.337842 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bsw2z\" (UniqueName: \"kubernetes.io/projected/34d32473-00c1-407b-b009-0d43c17038f9-kube-api-access-bsw2z\") pod \"dnsmasq-dns-768b698657-svwhq\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.337894 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-ovsdbserver-nb\") pod \"dnsmasq-dns-768b698657-svwhq\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.338079 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-ovsdbserver-sb\") pod \"dnsmasq-dns-768b698657-svwhq\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.338229 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-config\") pod \"dnsmasq-dns-768b698657-svwhq\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.338456 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-dns-swift-storage-0\") pod \"dnsmasq-dns-768b698657-svwhq\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.338555 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-dns-svc\") pod \"dnsmasq-dns-768b698657-svwhq\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.338644 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-openstack-edpm-ipam\") pod \"dnsmasq-dns-768b698657-svwhq\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.440638 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-dns-swift-storage-0\") pod \"dnsmasq-dns-768b698657-svwhq\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.440717 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-dns-svc\") pod \"dnsmasq-dns-768b698657-svwhq\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.440783 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-openstack-edpm-ipam\") pod \"dnsmasq-dns-768b698657-svwhq\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.440852 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bsw2z\" (UniqueName: \"kubernetes.io/projected/34d32473-00c1-407b-b009-0d43c17038f9-kube-api-access-bsw2z\") pod \"dnsmasq-dns-768b698657-svwhq\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.440872 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-ovsdbserver-nb\") pod \"dnsmasq-dns-768b698657-svwhq\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.440905 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-ovsdbserver-sb\") pod \"dnsmasq-dns-768b698657-svwhq\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.440944 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-config\") pod \"dnsmasq-dns-768b698657-svwhq\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.441902 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-config\") pod \"dnsmasq-dns-768b698657-svwhq\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.443159 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-dns-swift-storage-0\") pod \"dnsmasq-dns-768b698657-svwhq\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.444311 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-dns-svc\") pod \"dnsmasq-dns-768b698657-svwhq\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.445593 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-openstack-edpm-ipam\") pod \"dnsmasq-dns-768b698657-svwhq\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.446150 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-ovsdbserver-sb\") pod \"dnsmasq-dns-768b698657-svwhq\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.446476 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-ovsdbserver-nb\") pod \"dnsmasq-dns-768b698657-svwhq\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.470071 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bsw2z\" (UniqueName: \"kubernetes.io/projected/34d32473-00c1-407b-b009-0d43c17038f9-kube-api-access-bsw2z\") pod \"dnsmasq-dns-768b698657-svwhq\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.511009 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.705649 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.860794 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-dns-svc\") pod \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.860866 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-dns-swift-storage-0\") pod \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.860914 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-ovsdbserver-nb\") pod \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.861043 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s44ds\" (UniqueName: \"kubernetes.io/projected/66f89de8-d3eb-4aa2-a537-e4a768c732dd-kube-api-access-s44ds\") pod \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.861111 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-ovsdbserver-sb\") pod \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.861202 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-config\") pod \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\" (UID: \"66f89de8-d3eb-4aa2-a537-e4a768c732dd\") " Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.873012 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66f89de8-d3eb-4aa2-a537-e4a768c732dd-kube-api-access-s44ds" (OuterVolumeSpecName: "kube-api-access-s44ds") pod "66f89de8-d3eb-4aa2-a537-e4a768c732dd" (UID: "66f89de8-d3eb-4aa2-a537-e4a768c732dd"). InnerVolumeSpecName "kube-api-access-s44ds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.944787 5039 generic.go:334] "Generic (PLEG): container finished" podID="66f89de8-d3eb-4aa2-a537-e4a768c732dd" containerID="a2f4ce3a7ab8c05c67eb8ed15d8d542de4d214b41ddc3ae0eda0977119faba80" exitCode=0 Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.945072 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" event={"ID":"66f89de8-d3eb-4aa2-a537-e4a768c732dd","Type":"ContainerDied","Data":"a2f4ce3a7ab8c05c67eb8ed15d8d542de4d214b41ddc3ae0eda0977119faba80"} Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.945153 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" event={"ID":"66f89de8-d3eb-4aa2-a537-e4a768c732dd","Type":"ContainerDied","Data":"6305ad22819f32cff50ebd356bd036d794dc062007a4b1a16e19a2e1ec1d1007"} Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.945210 5039 scope.go:117] "RemoveContainer" containerID="a2f4ce3a7ab8c05c67eb8ed15d8d542de4d214b41ddc3ae0eda0977119faba80" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.945102 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-wpzfw" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.954396 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "66f89de8-d3eb-4aa2-a537-e4a768c732dd" (UID: "66f89de8-d3eb-4aa2-a537-e4a768c732dd"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.956106 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "66f89de8-d3eb-4aa2-a537-e4a768c732dd" (UID: "66f89de8-d3eb-4aa2-a537-e4a768c732dd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.964374 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s44ds\" (UniqueName: \"kubernetes.io/projected/66f89de8-d3eb-4aa2-a537-e4a768c732dd-kube-api-access-s44ds\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.964400 5039 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.964409 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.973374 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "66f89de8-d3eb-4aa2-a537-e4a768c732dd" (UID: "66f89de8-d3eb-4aa2-a537-e4a768c732dd"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.976308 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "66f89de8-d3eb-4aa2-a537-e4a768c732dd" (UID: "66f89de8-d3eb-4aa2-a537-e4a768c732dd"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:44:58 crc kubenswrapper[5039]: I1124 13:44:58.998964 5039 scope.go:117] "RemoveContainer" containerID="a2cd4de79a44bd132f5eaeed5f34575cdeb6a114ea2f3883903a4682d0ff8820" Nov 24 13:44:59 crc kubenswrapper[5039]: I1124 13:44:59.025125 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-config" (OuterVolumeSpecName: "config") pod "66f89de8-d3eb-4aa2-a537-e4a768c732dd" (UID: "66f89de8-d3eb-4aa2-a537-e4a768c732dd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:44:59 crc kubenswrapper[5039]: I1124 13:44:59.039992 5039 scope.go:117] "RemoveContainer" containerID="a2f4ce3a7ab8c05c67eb8ed15d8d542de4d214b41ddc3ae0eda0977119faba80" Nov 24 13:44:59 crc kubenswrapper[5039]: E1124 13:44:59.040577 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2f4ce3a7ab8c05c67eb8ed15d8d542de4d214b41ddc3ae0eda0977119faba80\": container with ID starting with a2f4ce3a7ab8c05c67eb8ed15d8d542de4d214b41ddc3ae0eda0977119faba80 not found: ID does not exist" containerID="a2f4ce3a7ab8c05c67eb8ed15d8d542de4d214b41ddc3ae0eda0977119faba80" Nov 24 13:44:59 crc kubenswrapper[5039]: I1124 13:44:59.040673 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2f4ce3a7ab8c05c67eb8ed15d8d542de4d214b41ddc3ae0eda0977119faba80"} err="failed to get container status \"a2f4ce3a7ab8c05c67eb8ed15d8d542de4d214b41ddc3ae0eda0977119faba80\": rpc error: code = NotFound desc = could not find container \"a2f4ce3a7ab8c05c67eb8ed15d8d542de4d214b41ddc3ae0eda0977119faba80\": container with ID starting with a2f4ce3a7ab8c05c67eb8ed15d8d542de4d214b41ddc3ae0eda0977119faba80 not found: ID does not exist" Nov 24 13:44:59 crc kubenswrapper[5039]: I1124 13:44:59.040765 5039 scope.go:117] "RemoveContainer" containerID="a2cd4de79a44bd132f5eaeed5f34575cdeb6a114ea2f3883903a4682d0ff8820" Nov 24 13:44:59 crc kubenswrapper[5039]: E1124 13:44:59.045207 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2cd4de79a44bd132f5eaeed5f34575cdeb6a114ea2f3883903a4682d0ff8820\": container with ID starting with a2cd4de79a44bd132f5eaeed5f34575cdeb6a114ea2f3883903a4682d0ff8820 not found: ID does not exist" containerID="a2cd4de79a44bd132f5eaeed5f34575cdeb6a114ea2f3883903a4682d0ff8820" Nov 24 13:44:59 crc kubenswrapper[5039]: I1124 13:44:59.045257 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2cd4de79a44bd132f5eaeed5f34575cdeb6a114ea2f3883903a4682d0ff8820"} err="failed to get container status \"a2cd4de79a44bd132f5eaeed5f34575cdeb6a114ea2f3883903a4682d0ff8820\": rpc error: code = NotFound desc = could not find container \"a2cd4de79a44bd132f5eaeed5f34575cdeb6a114ea2f3883903a4682d0ff8820\": container with ID starting with a2cd4de79a44bd132f5eaeed5f34575cdeb6a114ea2f3883903a4682d0ff8820 not found: ID does not exist" Nov 24 13:44:59 crc kubenswrapper[5039]: I1124 13:44:59.068864 5039 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:59 crc kubenswrapper[5039]: I1124 13:44:59.068892 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:59 crc kubenswrapper[5039]: I1124 13:44:59.068903 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66f89de8-d3eb-4aa2-a537-e4a768c732dd-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:44:59 crc kubenswrapper[5039]: I1124 13:44:59.283443 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-wpzfw"] Nov 24 13:44:59 crc kubenswrapper[5039]: I1124 13:44:59.294702 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-wpzfw"] Nov 24 13:44:59 crc kubenswrapper[5039]: I1124 13:44:59.334030 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-768b698657-svwhq"] Nov 24 13:44:59 crc kubenswrapper[5039]: W1124 13:44:59.340772 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod34d32473_00c1_407b_b009_0d43c17038f9.slice/crio-2acd68dfad98b66f8db726637344fa9d41011ffc9f44a393b4023e0e7842374d WatchSource:0}: Error finding container 2acd68dfad98b66f8db726637344fa9d41011ffc9f44a393b4023e0e7842374d: Status 404 returned error can't find the container with id 2acd68dfad98b66f8db726637344fa9d41011ffc9f44a393b4023e0e7842374d Nov 24 13:44:59 crc kubenswrapper[5039]: I1124 13:44:59.956110 5039 generic.go:334] "Generic (PLEG): container finished" podID="34d32473-00c1-407b-b009-0d43c17038f9" containerID="543e077dd91f790cca1736e95c1900b57a91f00a1272f00b039764beaca6589f" exitCode=0 Nov 24 13:44:59 crc kubenswrapper[5039]: I1124 13:44:59.956209 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-768b698657-svwhq" event={"ID":"34d32473-00c1-407b-b009-0d43c17038f9","Type":"ContainerDied","Data":"543e077dd91f790cca1736e95c1900b57a91f00a1272f00b039764beaca6589f"} Nov 24 13:44:59 crc kubenswrapper[5039]: I1124 13:44:59.956484 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-768b698657-svwhq" event={"ID":"34d32473-00c1-407b-b009-0d43c17038f9","Type":"ContainerStarted","Data":"2acd68dfad98b66f8db726637344fa9d41011ffc9f44a393b4023e0e7842374d"} Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.149914 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q"] Nov 24 13:45:00 crc kubenswrapper[5039]: E1124 13:45:00.150526 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66f89de8-d3eb-4aa2-a537-e4a768c732dd" containerName="dnsmasq-dns" Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.150551 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="66f89de8-d3eb-4aa2-a537-e4a768c732dd" containerName="dnsmasq-dns" Nov 24 13:45:00 crc kubenswrapper[5039]: E1124 13:45:00.150613 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66f89de8-d3eb-4aa2-a537-e4a768c732dd" containerName="init" Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.150623 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="66f89de8-d3eb-4aa2-a537-e4a768c732dd" containerName="init" Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.150888 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="66f89de8-d3eb-4aa2-a537-e4a768c732dd" containerName="dnsmasq-dns" Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.151867 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q" Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.153959 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.154024 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.177556 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q"] Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.296574 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mb869\" (UniqueName: \"kubernetes.io/projected/2298360a-7895-4303-a3f8-a32cfbe731c9-kube-api-access-mb869\") pod \"collect-profiles-29399865-rzs5q\" (UID: \"2298360a-7895-4303-a3f8-a32cfbe731c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q" Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.296742 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2298360a-7895-4303-a3f8-a32cfbe731c9-secret-volume\") pod \"collect-profiles-29399865-rzs5q\" (UID: \"2298360a-7895-4303-a3f8-a32cfbe731c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q" Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.296771 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2298360a-7895-4303-a3f8-a32cfbe731c9-config-volume\") pod \"collect-profiles-29399865-rzs5q\" (UID: \"2298360a-7895-4303-a3f8-a32cfbe731c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q" Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.333287 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66f89de8-d3eb-4aa2-a537-e4a768c732dd" path="/var/lib/kubelet/pods/66f89de8-d3eb-4aa2-a537-e4a768c732dd/volumes" Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.399477 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2298360a-7895-4303-a3f8-a32cfbe731c9-secret-volume\") pod \"collect-profiles-29399865-rzs5q\" (UID: \"2298360a-7895-4303-a3f8-a32cfbe731c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q" Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.399546 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2298360a-7895-4303-a3f8-a32cfbe731c9-config-volume\") pod \"collect-profiles-29399865-rzs5q\" (UID: \"2298360a-7895-4303-a3f8-a32cfbe731c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q" Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.399667 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mb869\" (UniqueName: \"kubernetes.io/projected/2298360a-7895-4303-a3f8-a32cfbe731c9-kube-api-access-mb869\") pod \"collect-profiles-29399865-rzs5q\" (UID: \"2298360a-7895-4303-a3f8-a32cfbe731c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q" Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.417745 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2298360a-7895-4303-a3f8-a32cfbe731c9-config-volume\") pod \"collect-profiles-29399865-rzs5q\" (UID: \"2298360a-7895-4303-a3f8-a32cfbe731c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q" Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.423738 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mb869\" (UniqueName: \"kubernetes.io/projected/2298360a-7895-4303-a3f8-a32cfbe731c9-kube-api-access-mb869\") pod \"collect-profiles-29399865-rzs5q\" (UID: \"2298360a-7895-4303-a3f8-a32cfbe731c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q" Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.426851 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2298360a-7895-4303-a3f8-a32cfbe731c9-secret-volume\") pod \"collect-profiles-29399865-rzs5q\" (UID: \"2298360a-7895-4303-a3f8-a32cfbe731c9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q" Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.478495 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q" Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.970355 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-bgfrt" event={"ID":"0be1e28f-a5d0-4685-b76f-5e074a81fe93","Type":"ContainerStarted","Data":"ae678a50eb009e9a11df09f527744b2e15e03d51b5d029f49a5f53f97755fe1a"} Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.973170 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-768b698657-svwhq" event={"ID":"34d32473-00c1-407b-b009-0d43c17038f9","Type":"ContainerStarted","Data":"dba3100ce911db3502d42c230d3b265f5953b08786d65209227b2e44659334ad"} Nov 24 13:45:00 crc kubenswrapper[5039]: W1124 13:45:00.996689 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2298360a_7895_4303_a3f8_a32cfbe731c9.slice/crio-1d7128316530491892830694074ee365698697c8fe5ac2745a8fbc331493a980 WatchSource:0}: Error finding container 1d7128316530491892830694074ee365698697c8fe5ac2745a8fbc331493a980: Status 404 returned error can't find the container with id 1d7128316530491892830694074ee365698697c8fe5ac2745a8fbc331493a980 Nov 24 13:45:00 crc kubenswrapper[5039]: I1124 13:45:00.998111 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q"] Nov 24 13:45:01 crc kubenswrapper[5039]: I1124 13:45:01.985153 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q" event={"ID":"2298360a-7895-4303-a3f8-a32cfbe731c9","Type":"ContainerStarted","Data":"15a0e18f432a7fbbb3d42a409ba29d73bfd79f8aeda2d3cc5fd265c1dd70a2f8"} Nov 24 13:45:01 crc kubenswrapper[5039]: I1124 13:45:01.985539 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:45:01 crc kubenswrapper[5039]: I1124 13:45:01.985559 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q" event={"ID":"2298360a-7895-4303-a3f8-a32cfbe731c9","Type":"ContainerStarted","Data":"1d7128316530491892830694074ee365698697c8fe5ac2745a8fbc331493a980"} Nov 24 13:45:02 crc kubenswrapper[5039]: I1124 13:45:02.023342 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-768b698657-svwhq" podStartSLOduration=4.023320332 podStartE2EDuration="4.023320332s" podCreationTimestamp="2025-11-24 13:44:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:45:02.013075091 +0000 UTC m=+1614.452199601" watchObservedRunningTime="2025-11-24 13:45:02.023320332 +0000 UTC m=+1614.462444852" Nov 24 13:45:02 crc kubenswrapper[5039]: I1124 13:45:02.045531 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-bgfrt" podStartSLOduration=2.800923908 podStartE2EDuration="48.045492615s" podCreationTimestamp="2025-11-24 13:44:14 +0000 UTC" firstStartedPulling="2025-11-24 13:44:15.05095226 +0000 UTC m=+1567.490076760" lastFinishedPulling="2025-11-24 13:45:00.295520957 +0000 UTC m=+1612.734645467" observedRunningTime="2025-11-24 13:45:02.040200626 +0000 UTC m=+1614.479325126" watchObservedRunningTime="2025-11-24 13:45:02.045492615 +0000 UTC m=+1614.484617115" Nov 24 13:45:02 crc kubenswrapper[5039]: I1124 13:45:02.055590 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q" podStartSLOduration=2.055571462 podStartE2EDuration="2.055571462s" podCreationTimestamp="2025-11-24 13:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:45:02.052451486 +0000 UTC m=+1614.491575986" watchObservedRunningTime="2025-11-24 13:45:02.055571462 +0000 UTC m=+1614.494695962" Nov 24 13:45:02 crc kubenswrapper[5039]: I1124 13:45:02.995585 5039 generic.go:334] "Generic (PLEG): container finished" podID="2298360a-7895-4303-a3f8-a32cfbe731c9" containerID="15a0e18f432a7fbbb3d42a409ba29d73bfd79f8aeda2d3cc5fd265c1dd70a2f8" exitCode=0 Nov 24 13:45:02 crc kubenswrapper[5039]: I1124 13:45:02.996024 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q" event={"ID":"2298360a-7895-4303-a3f8-a32cfbe731c9","Type":"ContainerDied","Data":"15a0e18f432a7fbbb3d42a409ba29d73bfd79f8aeda2d3cc5fd265c1dd70a2f8"} Nov 24 13:45:04 crc kubenswrapper[5039]: I1124 13:45:04.008490 5039 generic.go:334] "Generic (PLEG): container finished" podID="0be1e28f-a5d0-4685-b76f-5e074a81fe93" containerID="ae678a50eb009e9a11df09f527744b2e15e03d51b5d029f49a5f53f97755fe1a" exitCode=0 Nov 24 13:45:04 crc kubenswrapper[5039]: I1124 13:45:04.008539 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-bgfrt" event={"ID":"0be1e28f-a5d0-4685-b76f-5e074a81fe93","Type":"ContainerDied","Data":"ae678a50eb009e9a11df09f527744b2e15e03d51b5d029f49a5f53f97755fe1a"} Nov 24 13:45:04 crc kubenswrapper[5039]: I1124 13:45:04.332891 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 24 13:45:04 crc kubenswrapper[5039]: I1124 13:45:04.430413 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q" Nov 24 13:45:04 crc kubenswrapper[5039]: I1124 13:45:04.588863 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2298360a-7895-4303-a3f8-a32cfbe731c9-secret-volume\") pod \"2298360a-7895-4303-a3f8-a32cfbe731c9\" (UID: \"2298360a-7895-4303-a3f8-a32cfbe731c9\") " Nov 24 13:45:04 crc kubenswrapper[5039]: I1124 13:45:04.588990 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2298360a-7895-4303-a3f8-a32cfbe731c9-config-volume\") pod \"2298360a-7895-4303-a3f8-a32cfbe731c9\" (UID: \"2298360a-7895-4303-a3f8-a32cfbe731c9\") " Nov 24 13:45:04 crc kubenswrapper[5039]: I1124 13:45:04.589108 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mb869\" (UniqueName: \"kubernetes.io/projected/2298360a-7895-4303-a3f8-a32cfbe731c9-kube-api-access-mb869\") pod \"2298360a-7895-4303-a3f8-a32cfbe731c9\" (UID: \"2298360a-7895-4303-a3f8-a32cfbe731c9\") " Nov 24 13:45:04 crc kubenswrapper[5039]: I1124 13:45:04.589758 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2298360a-7895-4303-a3f8-a32cfbe731c9-config-volume" (OuterVolumeSpecName: "config-volume") pod "2298360a-7895-4303-a3f8-a32cfbe731c9" (UID: "2298360a-7895-4303-a3f8-a32cfbe731c9"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:45:04 crc kubenswrapper[5039]: I1124 13:45:04.590334 5039 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2298360a-7895-4303-a3f8-a32cfbe731c9-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:04 crc kubenswrapper[5039]: I1124 13:45:04.593846 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2298360a-7895-4303-a3f8-a32cfbe731c9-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2298360a-7895-4303-a3f8-a32cfbe731c9" (UID: "2298360a-7895-4303-a3f8-a32cfbe731c9"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:04 crc kubenswrapper[5039]: I1124 13:45:04.594151 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2298360a-7895-4303-a3f8-a32cfbe731c9-kube-api-access-mb869" (OuterVolumeSpecName: "kube-api-access-mb869") pod "2298360a-7895-4303-a3f8-a32cfbe731c9" (UID: "2298360a-7895-4303-a3f8-a32cfbe731c9"). InnerVolumeSpecName "kube-api-access-mb869". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:45:04 crc kubenswrapper[5039]: I1124 13:45:04.692765 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mb869\" (UniqueName: \"kubernetes.io/projected/2298360a-7895-4303-a3f8-a32cfbe731c9-kube-api-access-mb869\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:04 crc kubenswrapper[5039]: I1124 13:45:04.692818 5039 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2298360a-7895-4303-a3f8-a32cfbe731c9-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:05 crc kubenswrapper[5039]: I1124 13:45:05.020868 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q" event={"ID":"2298360a-7895-4303-a3f8-a32cfbe731c9","Type":"ContainerDied","Data":"1d7128316530491892830694074ee365698697c8fe5ac2745a8fbc331493a980"} Nov 24 13:45:05 crc kubenswrapper[5039]: I1124 13:45:05.021468 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1d7128316530491892830694074ee365698697c8fe5ac2745a8fbc331493a980" Nov 24 13:45:05 crc kubenswrapper[5039]: I1124 13:45:05.021150 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q" Nov 24 13:45:05 crc kubenswrapper[5039]: I1124 13:45:05.034733 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"831e28f3-74a0-4b52-933c-1a3e7a7811f6","Type":"ContainerStarted","Data":"d7d5b28c0a12345292200d4ec7b441408e55ebf9f0f12ab3fe09723cee8869da"} Nov 24 13:45:05 crc kubenswrapper[5039]: I1124 13:45:05.061380 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.235589628 podStartE2EDuration="46.061355881s" podCreationTimestamp="2025-11-24 13:44:19 +0000 UTC" firstStartedPulling="2025-11-24 13:44:20.657654127 +0000 UTC m=+1573.096778627" lastFinishedPulling="2025-11-24 13:45:04.48342038 +0000 UTC m=+1616.922544880" observedRunningTime="2025-11-24 13:45:05.05560437 +0000 UTC m=+1617.494728870" watchObservedRunningTime="2025-11-24 13:45:05.061355881 +0000 UTC m=+1617.500480381" Nov 24 13:45:05 crc kubenswrapper[5039]: I1124 13:45:05.322624 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:45:05 crc kubenswrapper[5039]: E1124 13:45:05.322929 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:45:05 crc kubenswrapper[5039]: I1124 13:45:05.502614 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-bgfrt" Nov 24 13:45:05 crc kubenswrapper[5039]: I1124 13:45:05.631905 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d7qm9\" (UniqueName: \"kubernetes.io/projected/0be1e28f-a5d0-4685-b76f-5e074a81fe93-kube-api-access-d7qm9\") pod \"0be1e28f-a5d0-4685-b76f-5e074a81fe93\" (UID: \"0be1e28f-a5d0-4685-b76f-5e074a81fe93\") " Nov 24 13:45:05 crc kubenswrapper[5039]: I1124 13:45:05.632243 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0be1e28f-a5d0-4685-b76f-5e074a81fe93-combined-ca-bundle\") pod \"0be1e28f-a5d0-4685-b76f-5e074a81fe93\" (UID: \"0be1e28f-a5d0-4685-b76f-5e074a81fe93\") " Nov 24 13:45:05 crc kubenswrapper[5039]: I1124 13:45:05.632274 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0be1e28f-a5d0-4685-b76f-5e074a81fe93-config-data\") pod \"0be1e28f-a5d0-4685-b76f-5e074a81fe93\" (UID: \"0be1e28f-a5d0-4685-b76f-5e074a81fe93\") " Nov 24 13:45:05 crc kubenswrapper[5039]: I1124 13:45:05.638577 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0be1e28f-a5d0-4685-b76f-5e074a81fe93-kube-api-access-d7qm9" (OuterVolumeSpecName: "kube-api-access-d7qm9") pod "0be1e28f-a5d0-4685-b76f-5e074a81fe93" (UID: "0be1e28f-a5d0-4685-b76f-5e074a81fe93"). InnerVolumeSpecName "kube-api-access-d7qm9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:45:05 crc kubenswrapper[5039]: I1124 13:45:05.662102 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0be1e28f-a5d0-4685-b76f-5e074a81fe93-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0be1e28f-a5d0-4685-b76f-5e074a81fe93" (UID: "0be1e28f-a5d0-4685-b76f-5e074a81fe93"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:05 crc kubenswrapper[5039]: I1124 13:45:05.719443 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0be1e28f-a5d0-4685-b76f-5e074a81fe93-config-data" (OuterVolumeSpecName: "config-data") pod "0be1e28f-a5d0-4685-b76f-5e074a81fe93" (UID: "0be1e28f-a5d0-4685-b76f-5e074a81fe93"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:05 crc kubenswrapper[5039]: I1124 13:45:05.734379 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d7qm9\" (UniqueName: \"kubernetes.io/projected/0be1e28f-a5d0-4685-b76f-5e074a81fe93-kube-api-access-d7qm9\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:05 crc kubenswrapper[5039]: I1124 13:45:05.734425 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0be1e28f-a5d0-4685-b76f-5e074a81fe93-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:05 crc kubenswrapper[5039]: I1124 13:45:05.734437 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0be1e28f-a5d0-4685-b76f-5e074a81fe93-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:06 crc kubenswrapper[5039]: I1124 13:45:06.046069 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-bgfrt" event={"ID":"0be1e28f-a5d0-4685-b76f-5e074a81fe93","Type":"ContainerDied","Data":"abd437b055840bb712aabd8d4db791999c9c6070e8daad1582d4bcd2318c8082"} Nov 24 13:45:06 crc kubenswrapper[5039]: I1124 13:45:06.046127 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="abd437b055840bb712aabd8d4db791999c9c6070e8daad1582d4bcd2318c8082" Nov 24 13:45:06 crc kubenswrapper[5039]: I1124 13:45:06.046285 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-bgfrt" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.576741 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-b4696fd89-fd5qp"] Nov 24 13:45:07 crc kubenswrapper[5039]: E1124 13:45:07.577472 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2298360a-7895-4303-a3f8-a32cfbe731c9" containerName="collect-profiles" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.577486 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="2298360a-7895-4303-a3f8-a32cfbe731c9" containerName="collect-profiles" Nov 24 13:45:07 crc kubenswrapper[5039]: E1124 13:45:07.577526 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0be1e28f-a5d0-4685-b76f-5e074a81fe93" containerName="heat-db-sync" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.577534 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0be1e28f-a5d0-4685-b76f-5e074a81fe93" containerName="heat-db-sync" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.577767 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="2298360a-7895-4303-a3f8-a32cfbe731c9" containerName="collect-profiles" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.577793 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0be1e28f-a5d0-4685-b76f-5e074a81fe93" containerName="heat-db-sync" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.578534 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-b4696fd89-fd5qp" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.595591 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-b4696fd89-fd5qp"] Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.618045 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-7b9d6d4567-h9q74"] Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.619461 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.632906 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-7b9d6d4567-h9q74"] Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.646557 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-6b64fd586c-rsg7v"] Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.648296 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.676588 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71066830-9639-4b66-b1c2-cbbc8eb2a821-combined-ca-bundle\") pod \"heat-engine-b4696fd89-fd5qp\" (UID: \"71066830-9639-4b66-b1c2-cbbc8eb2a821\") " pod="openstack/heat-engine-b4696fd89-fd5qp" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.676730 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/71066830-9639-4b66-b1c2-cbbc8eb2a821-config-data-custom\") pod \"heat-engine-b4696fd89-fd5qp\" (UID: \"71066830-9639-4b66-b1c2-cbbc8eb2a821\") " pod="openstack/heat-engine-b4696fd89-fd5qp" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.676770 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dfwt2\" (UniqueName: \"kubernetes.io/projected/71066830-9639-4b66-b1c2-cbbc8eb2a821-kube-api-access-dfwt2\") pod \"heat-engine-b4696fd89-fd5qp\" (UID: \"71066830-9639-4b66-b1c2-cbbc8eb2a821\") " pod="openstack/heat-engine-b4696fd89-fd5qp" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.676963 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71066830-9639-4b66-b1c2-cbbc8eb2a821-config-data\") pod \"heat-engine-b4696fd89-fd5qp\" (UID: \"71066830-9639-4b66-b1c2-cbbc8eb2a821\") " pod="openstack/heat-engine-b4696fd89-fd5qp" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.714127 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-6b64fd586c-rsg7v"] Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.778804 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71066830-9639-4b66-b1c2-cbbc8eb2a821-combined-ca-bundle\") pod \"heat-engine-b4696fd89-fd5qp\" (UID: \"71066830-9639-4b66-b1c2-cbbc8eb2a821\") " pod="openstack/heat-engine-b4696fd89-fd5qp" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.779129 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a04d457-423e-463d-8ea9-35d085150af5-config-data\") pod \"heat-cfnapi-6b64fd586c-rsg7v\" (UID: \"8a04d457-423e-463d-8ea9-35d085150af5\") " pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.779288 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8a04d457-423e-463d-8ea9-35d085150af5-internal-tls-certs\") pod \"heat-cfnapi-6b64fd586c-rsg7v\" (UID: \"8a04d457-423e-463d-8ea9-35d085150af5\") " pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.779428 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daa63fbd-a80c-4690-b49c-e402cb6b3c69-combined-ca-bundle\") pod \"heat-api-7b9d6d4567-h9q74\" (UID: \"daa63fbd-a80c-4690-b49c-e402cb6b3c69\") " pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.779580 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqtgf\" (UniqueName: \"kubernetes.io/projected/daa63fbd-a80c-4690-b49c-e402cb6b3c69-kube-api-access-fqtgf\") pod \"heat-api-7b9d6d4567-h9q74\" (UID: \"daa63fbd-a80c-4690-b49c-e402cb6b3c69\") " pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.779720 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/daa63fbd-a80c-4690-b49c-e402cb6b3c69-public-tls-certs\") pod \"heat-api-7b9d6d4567-h9q74\" (UID: \"daa63fbd-a80c-4690-b49c-e402cb6b3c69\") " pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.779838 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a04d457-423e-463d-8ea9-35d085150af5-combined-ca-bundle\") pod \"heat-cfnapi-6b64fd586c-rsg7v\" (UID: \"8a04d457-423e-463d-8ea9-35d085150af5\") " pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.779990 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a04d457-423e-463d-8ea9-35d085150af5-config-data-custom\") pod \"heat-cfnapi-6b64fd586c-rsg7v\" (UID: \"8a04d457-423e-463d-8ea9-35d085150af5\") " pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.780118 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/71066830-9639-4b66-b1c2-cbbc8eb2a821-config-data-custom\") pod \"heat-engine-b4696fd89-fd5qp\" (UID: \"71066830-9639-4b66-b1c2-cbbc8eb2a821\") " pod="openstack/heat-engine-b4696fd89-fd5qp" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.780240 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dfwt2\" (UniqueName: \"kubernetes.io/projected/71066830-9639-4b66-b1c2-cbbc8eb2a821-kube-api-access-dfwt2\") pod \"heat-engine-b4696fd89-fd5qp\" (UID: \"71066830-9639-4b66-b1c2-cbbc8eb2a821\") " pod="openstack/heat-engine-b4696fd89-fd5qp" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.780370 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxr8v\" (UniqueName: \"kubernetes.io/projected/8a04d457-423e-463d-8ea9-35d085150af5-kube-api-access-kxr8v\") pod \"heat-cfnapi-6b64fd586c-rsg7v\" (UID: \"8a04d457-423e-463d-8ea9-35d085150af5\") " pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.780454 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/daa63fbd-a80c-4690-b49c-e402cb6b3c69-config-data\") pod \"heat-api-7b9d6d4567-h9q74\" (UID: \"daa63fbd-a80c-4690-b49c-e402cb6b3c69\") " pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.780640 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8a04d457-423e-463d-8ea9-35d085150af5-public-tls-certs\") pod \"heat-cfnapi-6b64fd586c-rsg7v\" (UID: \"8a04d457-423e-463d-8ea9-35d085150af5\") " pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.780824 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/daa63fbd-a80c-4690-b49c-e402cb6b3c69-config-data-custom\") pod \"heat-api-7b9d6d4567-h9q74\" (UID: \"daa63fbd-a80c-4690-b49c-e402cb6b3c69\") " pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.780933 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/daa63fbd-a80c-4690-b49c-e402cb6b3c69-internal-tls-certs\") pod \"heat-api-7b9d6d4567-h9q74\" (UID: \"daa63fbd-a80c-4690-b49c-e402cb6b3c69\") " pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.781068 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71066830-9639-4b66-b1c2-cbbc8eb2a821-config-data\") pod \"heat-engine-b4696fd89-fd5qp\" (UID: \"71066830-9639-4b66-b1c2-cbbc8eb2a821\") " pod="openstack/heat-engine-b4696fd89-fd5qp" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.785988 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/71066830-9639-4b66-b1c2-cbbc8eb2a821-config-data-custom\") pod \"heat-engine-b4696fd89-fd5qp\" (UID: \"71066830-9639-4b66-b1c2-cbbc8eb2a821\") " pod="openstack/heat-engine-b4696fd89-fd5qp" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.792343 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71066830-9639-4b66-b1c2-cbbc8eb2a821-config-data\") pod \"heat-engine-b4696fd89-fd5qp\" (UID: \"71066830-9639-4b66-b1c2-cbbc8eb2a821\") " pod="openstack/heat-engine-b4696fd89-fd5qp" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.794601 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71066830-9639-4b66-b1c2-cbbc8eb2a821-combined-ca-bundle\") pod \"heat-engine-b4696fd89-fd5qp\" (UID: \"71066830-9639-4b66-b1c2-cbbc8eb2a821\") " pod="openstack/heat-engine-b4696fd89-fd5qp" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.796099 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dfwt2\" (UniqueName: \"kubernetes.io/projected/71066830-9639-4b66-b1c2-cbbc8eb2a821-kube-api-access-dfwt2\") pod \"heat-engine-b4696fd89-fd5qp\" (UID: \"71066830-9639-4b66-b1c2-cbbc8eb2a821\") " pod="openstack/heat-engine-b4696fd89-fd5qp" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.883175 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxr8v\" (UniqueName: \"kubernetes.io/projected/8a04d457-423e-463d-8ea9-35d085150af5-kube-api-access-kxr8v\") pod \"heat-cfnapi-6b64fd586c-rsg7v\" (UID: \"8a04d457-423e-463d-8ea9-35d085150af5\") " pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.883235 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/daa63fbd-a80c-4690-b49c-e402cb6b3c69-config-data\") pod \"heat-api-7b9d6d4567-h9q74\" (UID: \"daa63fbd-a80c-4690-b49c-e402cb6b3c69\") " pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.883277 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8a04d457-423e-463d-8ea9-35d085150af5-public-tls-certs\") pod \"heat-cfnapi-6b64fd586c-rsg7v\" (UID: \"8a04d457-423e-463d-8ea9-35d085150af5\") " pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.883332 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/daa63fbd-a80c-4690-b49c-e402cb6b3c69-config-data-custom\") pod \"heat-api-7b9d6d4567-h9q74\" (UID: \"daa63fbd-a80c-4690-b49c-e402cb6b3c69\") " pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.883356 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/daa63fbd-a80c-4690-b49c-e402cb6b3c69-internal-tls-certs\") pod \"heat-api-7b9d6d4567-h9q74\" (UID: \"daa63fbd-a80c-4690-b49c-e402cb6b3c69\") " pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.883457 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a04d457-423e-463d-8ea9-35d085150af5-config-data\") pod \"heat-cfnapi-6b64fd586c-rsg7v\" (UID: \"8a04d457-423e-463d-8ea9-35d085150af5\") " pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.883485 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8a04d457-423e-463d-8ea9-35d085150af5-internal-tls-certs\") pod \"heat-cfnapi-6b64fd586c-rsg7v\" (UID: \"8a04d457-423e-463d-8ea9-35d085150af5\") " pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.883524 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daa63fbd-a80c-4690-b49c-e402cb6b3c69-combined-ca-bundle\") pod \"heat-api-7b9d6d4567-h9q74\" (UID: \"daa63fbd-a80c-4690-b49c-e402cb6b3c69\") " pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.883551 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqtgf\" (UniqueName: \"kubernetes.io/projected/daa63fbd-a80c-4690-b49c-e402cb6b3c69-kube-api-access-fqtgf\") pod \"heat-api-7b9d6d4567-h9q74\" (UID: \"daa63fbd-a80c-4690-b49c-e402cb6b3c69\") " pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.883582 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/daa63fbd-a80c-4690-b49c-e402cb6b3c69-public-tls-certs\") pod \"heat-api-7b9d6d4567-h9q74\" (UID: \"daa63fbd-a80c-4690-b49c-e402cb6b3c69\") " pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.883604 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a04d457-423e-463d-8ea9-35d085150af5-combined-ca-bundle\") pod \"heat-cfnapi-6b64fd586c-rsg7v\" (UID: \"8a04d457-423e-463d-8ea9-35d085150af5\") " pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.883656 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a04d457-423e-463d-8ea9-35d085150af5-config-data-custom\") pod \"heat-cfnapi-6b64fd586c-rsg7v\" (UID: \"8a04d457-423e-463d-8ea9-35d085150af5\") " pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.887747 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/daa63fbd-a80c-4690-b49c-e402cb6b3c69-config-data-custom\") pod \"heat-api-7b9d6d4567-h9q74\" (UID: \"daa63fbd-a80c-4690-b49c-e402cb6b3c69\") " pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.888218 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/daa63fbd-a80c-4690-b49c-e402cb6b3c69-internal-tls-certs\") pod \"heat-api-7b9d6d4567-h9q74\" (UID: \"daa63fbd-a80c-4690-b49c-e402cb6b3c69\") " pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.891528 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/daa63fbd-a80c-4690-b49c-e402cb6b3c69-config-data\") pod \"heat-api-7b9d6d4567-h9q74\" (UID: \"daa63fbd-a80c-4690-b49c-e402cb6b3c69\") " pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.892081 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daa63fbd-a80c-4690-b49c-e402cb6b3c69-combined-ca-bundle\") pod \"heat-api-7b9d6d4567-h9q74\" (UID: \"daa63fbd-a80c-4690-b49c-e402cb6b3c69\") " pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.896905 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a04d457-423e-463d-8ea9-35d085150af5-combined-ca-bundle\") pod \"heat-cfnapi-6b64fd586c-rsg7v\" (UID: \"8a04d457-423e-463d-8ea9-35d085150af5\") " pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.896977 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/daa63fbd-a80c-4690-b49c-e402cb6b3c69-public-tls-certs\") pod \"heat-api-7b9d6d4567-h9q74\" (UID: \"daa63fbd-a80c-4690-b49c-e402cb6b3c69\") " pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.897361 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a04d457-423e-463d-8ea9-35d085150af5-config-data-custom\") pod \"heat-cfnapi-6b64fd586c-rsg7v\" (UID: \"8a04d457-423e-463d-8ea9-35d085150af5\") " pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.897915 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8a04d457-423e-463d-8ea9-35d085150af5-public-tls-certs\") pod \"heat-cfnapi-6b64fd586c-rsg7v\" (UID: \"8a04d457-423e-463d-8ea9-35d085150af5\") " pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.899961 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8a04d457-423e-463d-8ea9-35d085150af5-internal-tls-certs\") pod \"heat-cfnapi-6b64fd586c-rsg7v\" (UID: \"8a04d457-423e-463d-8ea9-35d085150af5\") " pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.907353 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqtgf\" (UniqueName: \"kubernetes.io/projected/daa63fbd-a80c-4690-b49c-e402cb6b3c69-kube-api-access-fqtgf\") pod \"heat-api-7b9d6d4567-h9q74\" (UID: \"daa63fbd-a80c-4690-b49c-e402cb6b3c69\") " pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.909100 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a04d457-423e-463d-8ea9-35d085150af5-config-data\") pod \"heat-cfnapi-6b64fd586c-rsg7v\" (UID: \"8a04d457-423e-463d-8ea9-35d085150af5\") " pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.909824 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxr8v\" (UniqueName: \"kubernetes.io/projected/8a04d457-423e-463d-8ea9-35d085150af5-kube-api-access-kxr8v\") pod \"heat-cfnapi-6b64fd586c-rsg7v\" (UID: \"8a04d457-423e-463d-8ea9-35d085150af5\") " pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.928703 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-b4696fd89-fd5qp" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.943352 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:07 crc kubenswrapper[5039]: I1124 13:45:07.978027 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:08 crc kubenswrapper[5039]: I1124 13:45:08.517066 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 13:45:08 crc kubenswrapper[5039]: I1124 13:45:08.549337 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-7b9d6d4567-h9q74"] Nov 24 13:45:08 crc kubenswrapper[5039]: I1124 13:45:08.636775 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-b4696fd89-fd5qp"] Nov 24 13:45:08 crc kubenswrapper[5039]: I1124 13:45:08.656764 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-wdmkf"] Nov 24 13:45:08 crc kubenswrapper[5039]: I1124 13:45:08.656982 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-68df85789f-wdmkf" podUID="3fd60732-1a85-4f00-88f8-5db8885ff122" containerName="dnsmasq-dns" containerID="cri-o://46af6a36325ad7c160e9e794036dc49e142bf2ca167cdbfaf5999e10fd086e62" gracePeriod=10 Nov 24 13:45:08 crc kubenswrapper[5039]: I1124 13:45:08.688804 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-6b64fd586c-rsg7v"] Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.108858 5039 generic.go:334] "Generic (PLEG): container finished" podID="3fd60732-1a85-4f00-88f8-5db8885ff122" containerID="46af6a36325ad7c160e9e794036dc49e142bf2ca167cdbfaf5999e10fd086e62" exitCode=0 Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.108922 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-wdmkf" event={"ID":"3fd60732-1a85-4f00-88f8-5db8885ff122","Type":"ContainerDied","Data":"46af6a36325ad7c160e9e794036dc49e142bf2ca167cdbfaf5999e10fd086e62"} Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.113765 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-b4696fd89-fd5qp" event={"ID":"71066830-9639-4b66-b1c2-cbbc8eb2a821","Type":"ContainerStarted","Data":"ede6d8625b27a8915c0826b60b09a39e38a0049045f06bfc9dae510ded5168b3"} Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.113817 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-b4696fd89-fd5qp" event={"ID":"71066830-9639-4b66-b1c2-cbbc8eb2a821","Type":"ContainerStarted","Data":"641f3fa83d4fa5a3c2c87100ffca02f1015b3dfa101aeb46506aa770ea88c151"} Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.122879 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7b9d6d4567-h9q74" event={"ID":"daa63fbd-a80c-4690-b49c-e402cb6b3c69","Type":"ContainerStarted","Data":"8647847a3c0cb9c8a81e834cfd86f3d2af839ec45e129588d44cf08be8f126d1"} Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.127497 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" event={"ID":"8a04d457-423e-463d-8ea9-35d085150af5","Type":"ContainerStarted","Data":"af043b00717674110d39024ea440c707c99d145519005701b99d75160ff499e4"} Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.308331 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.427824 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-openstack-edpm-ipam\") pod \"3fd60732-1a85-4f00-88f8-5db8885ff122\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.427885 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-config\") pod \"3fd60732-1a85-4f00-88f8-5db8885ff122\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.427943 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-dns-svc\") pod \"3fd60732-1a85-4f00-88f8-5db8885ff122\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.427981 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-dns-swift-storage-0\") pod \"3fd60732-1a85-4f00-88f8-5db8885ff122\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.428013 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-ovsdbserver-sb\") pod \"3fd60732-1a85-4f00-88f8-5db8885ff122\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.428102 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-ovsdbserver-nb\") pod \"3fd60732-1a85-4f00-88f8-5db8885ff122\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.428170 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mpkq7\" (UniqueName: \"kubernetes.io/projected/3fd60732-1a85-4f00-88f8-5db8885ff122-kube-api-access-mpkq7\") pod \"3fd60732-1a85-4f00-88f8-5db8885ff122\" (UID: \"3fd60732-1a85-4f00-88f8-5db8885ff122\") " Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.437772 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fd60732-1a85-4f00-88f8-5db8885ff122-kube-api-access-mpkq7" (OuterVolumeSpecName: "kube-api-access-mpkq7") pod "3fd60732-1a85-4f00-88f8-5db8885ff122" (UID: "3fd60732-1a85-4f00-88f8-5db8885ff122"). InnerVolumeSpecName "kube-api-access-mpkq7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.532111 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mpkq7\" (UniqueName: \"kubernetes.io/projected/3fd60732-1a85-4f00-88f8-5db8885ff122-kube-api-access-mpkq7\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.562647 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3fd60732-1a85-4f00-88f8-5db8885ff122" (UID: "3fd60732-1a85-4f00-88f8-5db8885ff122"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.566471 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3fd60732-1a85-4f00-88f8-5db8885ff122" (UID: "3fd60732-1a85-4f00-88f8-5db8885ff122"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.587183 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3fd60732-1a85-4f00-88f8-5db8885ff122" (UID: "3fd60732-1a85-4f00-88f8-5db8885ff122"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.593922 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-config" (OuterVolumeSpecName: "config") pod "3fd60732-1a85-4f00-88f8-5db8885ff122" (UID: "3fd60732-1a85-4f00-88f8-5db8885ff122"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.635108 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-config\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.635143 5039 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.635153 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.635162 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.642077 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "3fd60732-1a85-4f00-88f8-5db8885ff122" (UID: "3fd60732-1a85-4f00-88f8-5db8885ff122"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.657447 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "3fd60732-1a85-4f00-88f8-5db8885ff122" (UID: "3fd60732-1a85-4f00-88f8-5db8885ff122"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.745755 5039 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:09 crc kubenswrapper[5039]: I1124 13:45:09.746074 5039 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3fd60732-1a85-4f00-88f8-5db8885ff122-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:10 crc kubenswrapper[5039]: I1124 13:45:10.141852 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-wdmkf" Nov 24 13:45:10 crc kubenswrapper[5039]: I1124 13:45:10.141950 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-wdmkf" event={"ID":"3fd60732-1a85-4f00-88f8-5db8885ff122","Type":"ContainerDied","Data":"0fda0cc1ec8f970ac65ecd9de3cb931f62fd4de6c819bb8c5cb21d1b50748902"} Nov 24 13:45:10 crc kubenswrapper[5039]: I1124 13:45:10.142253 5039 scope.go:117] "RemoveContainer" containerID="46af6a36325ad7c160e9e794036dc49e142bf2ca167cdbfaf5999e10fd086e62" Nov 24 13:45:10 crc kubenswrapper[5039]: I1124 13:45:10.142492 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-b4696fd89-fd5qp" Nov 24 13:45:10 crc kubenswrapper[5039]: I1124 13:45:10.172755 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-b4696fd89-fd5qp" podStartSLOduration=3.172732232 podStartE2EDuration="3.172732232s" podCreationTimestamp="2025-11-24 13:45:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:45:10.161902716 +0000 UTC m=+1622.601027226" watchObservedRunningTime="2025-11-24 13:45:10.172732232 +0000 UTC m=+1622.611856742" Nov 24 13:45:10 crc kubenswrapper[5039]: I1124 13:45:10.189747 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-wdmkf"] Nov 24 13:45:10 crc kubenswrapper[5039]: I1124 13:45:10.199643 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-wdmkf"] Nov 24 13:45:10 crc kubenswrapper[5039]: I1124 13:45:10.321244 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3fd60732-1a85-4f00-88f8-5db8885ff122" path="/var/lib/kubelet/pods/3fd60732-1a85-4f00-88f8-5db8885ff122/volumes" Nov 24 13:45:10 crc kubenswrapper[5039]: I1124 13:45:10.406551 5039 scope.go:117] "RemoveContainer" containerID="ed3da603eaaf797f23b6b0dd76a329c69607dc69e356b95948106c53e0128589" Nov 24 13:45:12 crc kubenswrapper[5039]: I1124 13:45:12.237994 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" event={"ID":"8a04d457-423e-463d-8ea9-35d085150af5","Type":"ContainerStarted","Data":"88304c8686dd6921ccc0662cd4010ee73e7676a71f9039e8abc7504c58efd196"} Nov 24 13:45:12 crc kubenswrapper[5039]: I1124 13:45:12.238378 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:12 crc kubenswrapper[5039]: I1124 13:45:12.240608 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7b9d6d4567-h9q74" event={"ID":"daa63fbd-a80c-4690-b49c-e402cb6b3c69","Type":"ContainerStarted","Data":"ed24b5b7ed4a66d9f91996f8f1f0df8108ac4497de515e018f937bbc306fe786"} Nov 24 13:45:12 crc kubenswrapper[5039]: I1124 13:45:12.241187 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:12 crc kubenswrapper[5039]: I1124 13:45:12.267378 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" podStartSLOduration=2.900496472 podStartE2EDuration="5.267359325s" podCreationTimestamp="2025-11-24 13:45:07 +0000 UTC" firstStartedPulling="2025-11-24 13:45:08.700026007 +0000 UTC m=+1621.139150507" lastFinishedPulling="2025-11-24 13:45:11.06688886 +0000 UTC m=+1623.506013360" observedRunningTime="2025-11-24 13:45:12.259624585 +0000 UTC m=+1624.698749115" watchObservedRunningTime="2025-11-24 13:45:12.267359325 +0000 UTC m=+1624.706483825" Nov 24 13:45:12 crc kubenswrapper[5039]: I1124 13:45:12.293223 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-7b9d6d4567-h9q74" podStartSLOduration=2.8114964110000003 podStartE2EDuration="5.293200358s" podCreationTimestamp="2025-11-24 13:45:07 +0000 UTC" firstStartedPulling="2025-11-24 13:45:08.584995769 +0000 UTC m=+1621.024120269" lastFinishedPulling="2025-11-24 13:45:11.066699706 +0000 UTC m=+1623.505824216" observedRunningTime="2025-11-24 13:45:12.284271489 +0000 UTC m=+1624.723395989" watchObservedRunningTime="2025-11-24 13:45:12.293200358 +0000 UTC m=+1624.732324858" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.404951 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg"] Nov 24 13:45:17 crc kubenswrapper[5039]: E1124 13:45:17.406282 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3fd60732-1a85-4f00-88f8-5db8885ff122" containerName="init" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.406305 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="3fd60732-1a85-4f00-88f8-5db8885ff122" containerName="init" Nov 24 13:45:17 crc kubenswrapper[5039]: E1124 13:45:17.406333 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3fd60732-1a85-4f00-88f8-5db8885ff122" containerName="dnsmasq-dns" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.406345 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="3fd60732-1a85-4f00-88f8-5db8885ff122" containerName="dnsmasq-dns" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.406675 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="3fd60732-1a85-4f00-88f8-5db8885ff122" containerName="dnsmasq-dns" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.407932 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.409699 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.409959 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.410570 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.421707 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.452587 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg"] Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.522267 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6da7bf95-9494-43c0-be01-d2170fe36b61-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-475lg\" (UID: \"6da7bf95-9494-43c0-be01-d2170fe36b61\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.522421 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6da7bf95-9494-43c0-be01-d2170fe36b61-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-475lg\" (UID: \"6da7bf95-9494-43c0-be01-d2170fe36b61\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.522567 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6da7bf95-9494-43c0-be01-d2170fe36b61-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-475lg\" (UID: \"6da7bf95-9494-43c0-be01-d2170fe36b61\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.522611 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhzh7\" (UniqueName: \"kubernetes.io/projected/6da7bf95-9494-43c0-be01-d2170fe36b61-kube-api-access-dhzh7\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-475lg\" (UID: \"6da7bf95-9494-43c0-be01-d2170fe36b61\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.624414 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6da7bf95-9494-43c0-be01-d2170fe36b61-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-475lg\" (UID: \"6da7bf95-9494-43c0-be01-d2170fe36b61\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.624591 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6da7bf95-9494-43c0-be01-d2170fe36b61-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-475lg\" (UID: \"6da7bf95-9494-43c0-be01-d2170fe36b61\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.624627 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhzh7\" (UniqueName: \"kubernetes.io/projected/6da7bf95-9494-43c0-be01-d2170fe36b61-kube-api-access-dhzh7\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-475lg\" (UID: \"6da7bf95-9494-43c0-be01-d2170fe36b61\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.625799 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6da7bf95-9494-43c0-be01-d2170fe36b61-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-475lg\" (UID: \"6da7bf95-9494-43c0-be01-d2170fe36b61\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.633067 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6da7bf95-9494-43c0-be01-d2170fe36b61-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-475lg\" (UID: \"6da7bf95-9494-43c0-be01-d2170fe36b61\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.633305 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6da7bf95-9494-43c0-be01-d2170fe36b61-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-475lg\" (UID: \"6da7bf95-9494-43c0-be01-d2170fe36b61\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.639812 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhzh7\" (UniqueName: \"kubernetes.io/projected/6da7bf95-9494-43c0-be01-d2170fe36b61-kube-api-access-dhzh7\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-475lg\" (UID: \"6da7bf95-9494-43c0-be01-d2170fe36b61\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.643168 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6da7bf95-9494-43c0-be01-d2170fe36b61-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-475lg\" (UID: \"6da7bf95-9494-43c0-be01-d2170fe36b61\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg" Nov 24 13:45:17 crc kubenswrapper[5039]: I1124 13:45:17.763648 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg" Nov 24 13:45:18 crc kubenswrapper[5039]: I1124 13:45:18.480347 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg"] Nov 24 13:45:19 crc kubenswrapper[5039]: I1124 13:45:19.268860 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-7b9d6d4567-h9q74" Nov 24 13:45:19 crc kubenswrapper[5039]: I1124 13:45:19.321146 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-78c587fb4d-fl6qr"] Nov 24 13:45:19 crc kubenswrapper[5039]: I1124 13:45:19.321392 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-78c587fb4d-fl6qr" podUID="d505257c-0bc2-427b-8f9a-e5333460f461" containerName="heat-api" containerID="cri-o://424a59cbb122a5e7c7f6841bd3dd37f12af72b94bf4b781403646bc495cdd742" gracePeriod=60 Nov 24 13:45:19 crc kubenswrapper[5039]: I1124 13:45:19.348768 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg" event={"ID":"6da7bf95-9494-43c0-be01-d2170fe36b61","Type":"ContainerStarted","Data":"aedbf150535116808adfac8e3d183e735f1b79119a183ec93b3d19dc12c5e5ea"} Nov 24 13:45:19 crc kubenswrapper[5039]: I1124 13:45:19.522489 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-6b64fd586c-rsg7v" Nov 24 13:45:19 crc kubenswrapper[5039]: I1124 13:45:19.586538 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7f45f46b76-fckwv"] Nov 24 13:45:19 crc kubenswrapper[5039]: I1124 13:45:19.586761 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-7f45f46b76-fckwv" podUID="88f5edac-dd13-4a09-97a0-60f263e60f23" containerName="heat-cfnapi" containerID="cri-o://68de4bb0f4ad138e9ed720c1fa24cc278b351b0acb32b25e87bc7dec30cfd042" gracePeriod=60 Nov 24 13:45:20 crc kubenswrapper[5039]: I1124 13:45:20.307262 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:45:20 crc kubenswrapper[5039]: E1124 13:45:20.307595 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:45:21 crc kubenswrapper[5039]: I1124 13:45:21.388722 5039 generic.go:334] "Generic (PLEG): container finished" podID="c2b248b0-d5b6-4800-9f0a-915f03d73696" containerID="6d0424f5b11772df023db286e37acf11ffeb7121aac6159494a360a6f71cf940" exitCode=0 Nov 24 13:45:21 crc kubenswrapper[5039]: I1124 13:45:21.388842 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c2b248b0-d5b6-4800-9f0a-915f03d73696","Type":"ContainerDied","Data":"6d0424f5b11772df023db286e37acf11ffeb7121aac6159494a360a6f71cf940"} Nov 24 13:45:21 crc kubenswrapper[5039]: I1124 13:45:21.391751 5039 generic.go:334] "Generic (PLEG): container finished" podID="b820e90e-779c-4300-b0e0-affe5118e73f" containerID="666cb50e3cd5bf82688f2dddbb37d8be99f6462371f801198859bdcb23a8c40d" exitCode=0 Nov 24 13:45:21 crc kubenswrapper[5039]: I1124 13:45:21.391784 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b820e90e-779c-4300-b0e0-affe5118e73f","Type":"ContainerDied","Data":"666cb50e3cd5bf82688f2dddbb37d8be99f6462371f801198859bdcb23a8c40d"} Nov 24 13:45:22 crc kubenswrapper[5039]: I1124 13:45:22.493444 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-78c587fb4d-fl6qr" podUID="d505257c-0bc2-427b-8f9a-e5333460f461" containerName="heat-api" probeResult="failure" output="Get \"https://10.217.0.203:8004/healthcheck\": read tcp 10.217.0.2:42434->10.217.0.203:8004: read: connection reset by peer" Nov 24 13:45:22 crc kubenswrapper[5039]: I1124 13:45:22.722982 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-7f45f46b76-fckwv" podUID="88f5edac-dd13-4a09-97a0-60f263e60f23" containerName="heat-cfnapi" probeResult="failure" output="Get \"https://10.217.0.204:8000/healthcheck\": read tcp 10.217.0.2:41726->10.217.0.204:8000: read: connection reset by peer" Nov 24 13:45:23 crc kubenswrapper[5039]: I1124 13:45:23.417206 5039 generic.go:334] "Generic (PLEG): container finished" podID="d505257c-0bc2-427b-8f9a-e5333460f461" containerID="424a59cbb122a5e7c7f6841bd3dd37f12af72b94bf4b781403646bc495cdd742" exitCode=0 Nov 24 13:45:23 crc kubenswrapper[5039]: I1124 13:45:23.417280 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-78c587fb4d-fl6qr" event={"ID":"d505257c-0bc2-427b-8f9a-e5333460f461","Type":"ContainerDied","Data":"424a59cbb122a5e7c7f6841bd3dd37f12af72b94bf4b781403646bc495cdd742"} Nov 24 13:45:23 crc kubenswrapper[5039]: I1124 13:45:23.419696 5039 generic.go:334] "Generic (PLEG): container finished" podID="88f5edac-dd13-4a09-97a0-60f263e60f23" containerID="68de4bb0f4ad138e9ed720c1fa24cc278b351b0acb32b25e87bc7dec30cfd042" exitCode=0 Nov 24 13:45:23 crc kubenswrapper[5039]: I1124 13:45:23.419739 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7f45f46b76-fckwv" event={"ID":"88f5edac-dd13-4a09-97a0-60f263e60f23","Type":"ContainerDied","Data":"68de4bb0f4ad138e9ed720c1fa24cc278b351b0acb32b25e87bc7dec30cfd042"} Nov 24 13:45:24 crc kubenswrapper[5039]: I1124 13:45:24.721113 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-78c587fb4d-fl6qr" podUID="d505257c-0bc2-427b-8f9a-e5333460f461" containerName="heat-api" probeResult="failure" output="Get \"https://10.217.0.203:8004/healthcheck\": dial tcp 10.217.0.203:8004: connect: connection refused" Nov 24 13:45:24 crc kubenswrapper[5039]: I1124 13:45:24.739404 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-7f45f46b76-fckwv" podUID="88f5edac-dd13-4a09-97a0-60f263e60f23" containerName="heat-cfnapi" probeResult="failure" output="Get \"https://10.217.0.204:8000/healthcheck\": dial tcp 10.217.0.204:8000: connect: connection refused" Nov 24 13:45:27 crc kubenswrapper[5039]: I1124 13:45:27.827405 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:45:27 crc kubenswrapper[5039]: I1124 13:45:27.892612 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:45:27 crc kubenswrapper[5039]: I1124 13:45:27.985203 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-internal-tls-certs\") pod \"d505257c-0bc2-427b-8f9a-e5333460f461\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " Nov 24 13:45:27 crc kubenswrapper[5039]: I1124 13:45:27.985268 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-combined-ca-bundle\") pod \"88f5edac-dd13-4a09-97a0-60f263e60f23\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " Nov 24 13:45:27 crc kubenswrapper[5039]: I1124 13:45:27.985305 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-config-data\") pod \"d505257c-0bc2-427b-8f9a-e5333460f461\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " Nov 24 13:45:27 crc kubenswrapper[5039]: I1124 13:45:27.985340 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-combined-ca-bundle\") pod \"d505257c-0bc2-427b-8f9a-e5333460f461\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " Nov 24 13:45:27 crc kubenswrapper[5039]: I1124 13:45:27.985404 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-config-data-custom\") pod \"d505257c-0bc2-427b-8f9a-e5333460f461\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " Nov 24 13:45:27 crc kubenswrapper[5039]: I1124 13:45:27.985430 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-public-tls-certs\") pod \"88f5edac-dd13-4a09-97a0-60f263e60f23\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " Nov 24 13:45:27 crc kubenswrapper[5039]: I1124 13:45:27.985472 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-public-tls-certs\") pod \"d505257c-0bc2-427b-8f9a-e5333460f461\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " Nov 24 13:45:27 crc kubenswrapper[5039]: I1124 13:45:27.985521 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dlwdp\" (UniqueName: \"kubernetes.io/projected/d505257c-0bc2-427b-8f9a-e5333460f461-kube-api-access-dlwdp\") pod \"d505257c-0bc2-427b-8f9a-e5333460f461\" (UID: \"d505257c-0bc2-427b-8f9a-e5333460f461\") " Nov 24 13:45:27 crc kubenswrapper[5039]: I1124 13:45:27.985601 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bchxx\" (UniqueName: \"kubernetes.io/projected/88f5edac-dd13-4a09-97a0-60f263e60f23-kube-api-access-bchxx\") pod \"88f5edac-dd13-4a09-97a0-60f263e60f23\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " Nov 24 13:45:27 crc kubenswrapper[5039]: I1124 13:45:27.985650 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-config-data-custom\") pod \"88f5edac-dd13-4a09-97a0-60f263e60f23\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " Nov 24 13:45:27 crc kubenswrapper[5039]: I1124 13:45:27.985685 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-internal-tls-certs\") pod \"88f5edac-dd13-4a09-97a0-60f263e60f23\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " Nov 24 13:45:27 crc kubenswrapper[5039]: I1124 13:45:27.985715 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-config-data\") pod \"88f5edac-dd13-4a09-97a0-60f263e60f23\" (UID: \"88f5edac-dd13-4a09-97a0-60f263e60f23\") " Nov 24 13:45:27 crc kubenswrapper[5039]: I1124 13:45:27.990278 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-b4696fd89-fd5qp" Nov 24 13:45:27 crc kubenswrapper[5039]: I1124 13:45:27.992630 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d505257c-0bc2-427b-8f9a-e5333460f461-kube-api-access-dlwdp" (OuterVolumeSpecName: "kube-api-access-dlwdp") pod "d505257c-0bc2-427b-8f9a-e5333460f461" (UID: "d505257c-0bc2-427b-8f9a-e5333460f461"). InnerVolumeSpecName "kube-api-access-dlwdp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.005621 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "88f5edac-dd13-4a09-97a0-60f263e60f23" (UID: "88f5edac-dd13-4a09-97a0-60f263e60f23"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.007195 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d505257c-0bc2-427b-8f9a-e5333460f461" (UID: "d505257c-0bc2-427b-8f9a-e5333460f461"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.011338 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88f5edac-dd13-4a09-97a0-60f263e60f23-kube-api-access-bchxx" (OuterVolumeSpecName: "kube-api-access-bchxx") pod "88f5edac-dd13-4a09-97a0-60f263e60f23" (UID: "88f5edac-dd13-4a09-97a0-60f263e60f23"). InnerVolumeSpecName "kube-api-access-bchxx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.076138 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "88f5edac-dd13-4a09-97a0-60f263e60f23" (UID: "88f5edac-dd13-4a09-97a0-60f263e60f23"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.078968 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-75cf4567b8-dlwgp"] Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.079215 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-75cf4567b8-dlwgp" podUID="ed94ecdc-8218-45f3-b908-7a2410b57196" containerName="heat-engine" containerID="cri-o://1fec2c123f26a5e34dff68769cbf1db41e66f70dea0ceeb0bd524545dedee7b4" gracePeriod=60 Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.080753 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "d505257c-0bc2-427b-8f9a-e5333460f461" (UID: "d505257c-0bc2-427b-8f9a-e5333460f461"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.089241 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.089282 5039 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.089294 5039 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.089307 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dlwdp\" (UniqueName: \"kubernetes.io/projected/d505257c-0bc2-427b-8f9a-e5333460f461-kube-api-access-dlwdp\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.089321 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bchxx\" (UniqueName: \"kubernetes.io/projected/88f5edac-dd13-4a09-97a0-60f263e60f23-kube-api-access-bchxx\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.089332 5039 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.119701 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-config-data" (OuterVolumeSpecName: "config-data") pod "d505257c-0bc2-427b-8f9a-e5333460f461" (UID: "d505257c-0bc2-427b-8f9a-e5333460f461"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.130682 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "88f5edac-dd13-4a09-97a0-60f263e60f23" (UID: "88f5edac-dd13-4a09-97a0-60f263e60f23"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.142133 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "88f5edac-dd13-4a09-97a0-60f263e60f23" (UID: "88f5edac-dd13-4a09-97a0-60f263e60f23"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.142488 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d505257c-0bc2-427b-8f9a-e5333460f461" (UID: "d505257c-0bc2-427b-8f9a-e5333460f461"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.163683 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-config-data" (OuterVolumeSpecName: "config-data") pod "88f5edac-dd13-4a09-97a0-60f263e60f23" (UID: "88f5edac-dd13-4a09-97a0-60f263e60f23"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.164065 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "d505257c-0bc2-427b-8f9a-e5333460f461" (UID: "d505257c-0bc2-427b-8f9a-e5333460f461"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.191466 5039 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.191713 5039 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.191776 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88f5edac-dd13-4a09-97a0-60f263e60f23-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.191828 5039 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.191891 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.191951 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d505257c-0bc2-427b-8f9a-e5333460f461-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.471413 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-78c587fb4d-fl6qr" event={"ID":"d505257c-0bc2-427b-8f9a-e5333460f461","Type":"ContainerDied","Data":"f3d86bd90a0ba7915d5ab7d03f0b5471c33a79b941f3525024485ce5e0107975"} Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.471465 5039 scope.go:117] "RemoveContainer" containerID="424a59cbb122a5e7c7f6841bd3dd37f12af72b94bf4b781403646bc495cdd742" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.471476 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-78c587fb4d-fl6qr" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.477293 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg" event={"ID":"6da7bf95-9494-43c0-be01-d2170fe36b61","Type":"ContainerStarted","Data":"dfbd8717f8130fcbd0047bc6b44c0ceb4ee31d3c805868dd0686a1ad76ed3b11"} Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.481477 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c2b248b0-d5b6-4800-9f0a-915f03d73696","Type":"ContainerStarted","Data":"7fadd120f36cfd900bca73fcae671cf3ec74c0c4f200b56b3469c28f9d9d3fc1"} Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.481780 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.488615 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7f45f46b76-fckwv" event={"ID":"88f5edac-dd13-4a09-97a0-60f263e60f23","Type":"ContainerDied","Data":"36b5746515e23e1fb92ebea5cdd7aab51f484e14e7ef0b4df1bcd43acff19c90"} Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.488644 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7f45f46b76-fckwv" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.490663 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b820e90e-779c-4300-b0e0-affe5118e73f","Type":"ContainerStarted","Data":"5ae87b7a32674cbe10d2bdaf505528dfdac167ef49bfdef6a604ed25843d640a"} Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.490959 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.504847 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg" podStartSLOduration=2.476919417 podStartE2EDuration="11.504831131s" podCreationTimestamp="2025-11-24 13:45:17 +0000 UTC" firstStartedPulling="2025-11-24 13:45:18.475385826 +0000 UTC m=+1630.914510326" lastFinishedPulling="2025-11-24 13:45:27.50329754 +0000 UTC m=+1639.942422040" observedRunningTime="2025-11-24 13:45:28.500634968 +0000 UTC m=+1640.939759468" watchObservedRunningTime="2025-11-24 13:45:28.504831131 +0000 UTC m=+1640.943955631" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.546559 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=52.546537663 podStartE2EDuration="52.546537663s" podCreationTimestamp="2025-11-24 13:44:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:45:28.526586913 +0000 UTC m=+1640.965711433" watchObservedRunningTime="2025-11-24 13:45:28.546537663 +0000 UTC m=+1640.985662163" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.547573 5039 scope.go:117] "RemoveContainer" containerID="68de4bb0f4ad138e9ed720c1fa24cc278b351b0acb32b25e87bc7dec30cfd042" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.595219 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=55.595189714 podStartE2EDuration="55.595189714s" podCreationTimestamp="2025-11-24 13:44:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:45:28.576169859 +0000 UTC m=+1641.015294379" watchObservedRunningTime="2025-11-24 13:45:28.595189714 +0000 UTC m=+1641.034314214" Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.636895 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-78c587fb4d-fl6qr"] Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.659251 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-78c587fb4d-fl6qr"] Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.692723 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7f45f46b76-fckwv"] Nov 24 13:45:28 crc kubenswrapper[5039]: I1124 13:45:28.708080 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-7f45f46b76-fckwv"] Nov 24 13:45:30 crc kubenswrapper[5039]: I1124 13:45:30.319436 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88f5edac-dd13-4a09-97a0-60f263e60f23" path="/var/lib/kubelet/pods/88f5edac-dd13-4a09-97a0-60f263e60f23/volumes" Nov 24 13:45:30 crc kubenswrapper[5039]: I1124 13:45:30.320025 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d505257c-0bc2-427b-8f9a-e5333460f461" path="/var/lib/kubelet/pods/d505257c-0bc2-427b-8f9a-e5333460f461/volumes" Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.319346 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-c49v2"] Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.324956 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-c49v2"] Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.420718 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-g5jcx"] Nov 24 13:45:34 crc kubenswrapper[5039]: E1124 13:45:34.421347 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d505257c-0bc2-427b-8f9a-e5333460f461" containerName="heat-api" Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.421453 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="d505257c-0bc2-427b-8f9a-e5333460f461" containerName="heat-api" Nov 24 13:45:34 crc kubenswrapper[5039]: E1124 13:45:34.421566 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88f5edac-dd13-4a09-97a0-60f263e60f23" containerName="heat-cfnapi" Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.421931 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="88f5edac-dd13-4a09-97a0-60f263e60f23" containerName="heat-cfnapi" Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.422240 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="d505257c-0bc2-427b-8f9a-e5333460f461" containerName="heat-api" Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.422354 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="88f5edac-dd13-4a09-97a0-60f263e60f23" containerName="heat-cfnapi" Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.423423 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-g5jcx" Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.426668 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.448804 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-g5jcx"] Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.517490 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8q7n5\" (UniqueName: \"kubernetes.io/projected/831424c5-4dbf-4e75-871a-be5c0c7d64a2-kube-api-access-8q7n5\") pod \"aodh-db-sync-g5jcx\" (UID: \"831424c5-4dbf-4e75-871a-be5c0c7d64a2\") " pod="openstack/aodh-db-sync-g5jcx" Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.518071 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/831424c5-4dbf-4e75-871a-be5c0c7d64a2-config-data\") pod \"aodh-db-sync-g5jcx\" (UID: \"831424c5-4dbf-4e75-871a-be5c0c7d64a2\") " pod="openstack/aodh-db-sync-g5jcx" Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.518218 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/831424c5-4dbf-4e75-871a-be5c0c7d64a2-scripts\") pod \"aodh-db-sync-g5jcx\" (UID: \"831424c5-4dbf-4e75-871a-be5c0c7d64a2\") " pod="openstack/aodh-db-sync-g5jcx" Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.518469 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/831424c5-4dbf-4e75-871a-be5c0c7d64a2-combined-ca-bundle\") pod \"aodh-db-sync-g5jcx\" (UID: \"831424c5-4dbf-4e75-871a-be5c0c7d64a2\") " pod="openstack/aodh-db-sync-g5jcx" Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.620626 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/831424c5-4dbf-4e75-871a-be5c0c7d64a2-combined-ca-bundle\") pod \"aodh-db-sync-g5jcx\" (UID: \"831424c5-4dbf-4e75-871a-be5c0c7d64a2\") " pod="openstack/aodh-db-sync-g5jcx" Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.620946 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8q7n5\" (UniqueName: \"kubernetes.io/projected/831424c5-4dbf-4e75-871a-be5c0c7d64a2-kube-api-access-8q7n5\") pod \"aodh-db-sync-g5jcx\" (UID: \"831424c5-4dbf-4e75-871a-be5c0c7d64a2\") " pod="openstack/aodh-db-sync-g5jcx" Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.621039 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/831424c5-4dbf-4e75-871a-be5c0c7d64a2-config-data\") pod \"aodh-db-sync-g5jcx\" (UID: \"831424c5-4dbf-4e75-871a-be5c0c7d64a2\") " pod="openstack/aodh-db-sync-g5jcx" Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.621135 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/831424c5-4dbf-4e75-871a-be5c0c7d64a2-scripts\") pod \"aodh-db-sync-g5jcx\" (UID: \"831424c5-4dbf-4e75-871a-be5c0c7d64a2\") " pod="openstack/aodh-db-sync-g5jcx" Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.627168 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/831424c5-4dbf-4e75-871a-be5c0c7d64a2-scripts\") pod \"aodh-db-sync-g5jcx\" (UID: \"831424c5-4dbf-4e75-871a-be5c0c7d64a2\") " pod="openstack/aodh-db-sync-g5jcx" Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.627588 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/831424c5-4dbf-4e75-871a-be5c0c7d64a2-config-data\") pod \"aodh-db-sync-g5jcx\" (UID: \"831424c5-4dbf-4e75-871a-be5c0c7d64a2\") " pod="openstack/aodh-db-sync-g5jcx" Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.628893 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/831424c5-4dbf-4e75-871a-be5c0c7d64a2-combined-ca-bundle\") pod \"aodh-db-sync-g5jcx\" (UID: \"831424c5-4dbf-4e75-871a-be5c0c7d64a2\") " pod="openstack/aodh-db-sync-g5jcx" Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.639339 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8q7n5\" (UniqueName: \"kubernetes.io/projected/831424c5-4dbf-4e75-871a-be5c0c7d64a2-kube-api-access-8q7n5\") pod \"aodh-db-sync-g5jcx\" (UID: \"831424c5-4dbf-4e75-871a-be5c0c7d64a2\") " pod="openstack/aodh-db-sync-g5jcx" Nov 24 13:45:34 crc kubenswrapper[5039]: I1124 13:45:34.746466 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-g5jcx" Nov 24 13:45:35 crc kubenswrapper[5039]: I1124 13:45:35.307263 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:45:35 crc kubenswrapper[5039]: E1124 13:45:35.307718 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:45:36 crc kubenswrapper[5039]: I1124 13:45:36.320787 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0bbaa038-035d-4e44-ace2-4ac374ccc28a" path="/var/lib/kubelet/pods/0bbaa038-035d-4e44-ace2-4ac374ccc28a/volumes" Nov 24 13:45:36 crc kubenswrapper[5039]: E1124 13:45:36.470768 5039 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1fec2c123f26a5e34dff68769cbf1db41e66f70dea0ceeb0bd524545dedee7b4" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 24 13:45:36 crc kubenswrapper[5039]: E1124 13:45:36.493946 5039 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1fec2c123f26a5e34dff68769cbf1db41e66f70dea0ceeb0bd524545dedee7b4" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 24 13:45:36 crc kubenswrapper[5039]: E1124 13:45:36.498672 5039 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1fec2c123f26a5e34dff68769cbf1db41e66f70dea0ceeb0bd524545dedee7b4" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 24 13:45:36 crc kubenswrapper[5039]: E1124 13:45:36.498738 5039 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-75cf4567b8-dlwgp" podUID="ed94ecdc-8218-45f3-b908-7a2410b57196" containerName="heat-engine" Nov 24 13:45:36 crc kubenswrapper[5039]: I1124 13:45:36.881214 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-g5jcx"] Nov 24 13:45:37 crc kubenswrapper[5039]: I1124 13:45:37.027175 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="c2b248b0-d5b6-4800-9f0a-915f03d73696" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.255:5671: connect: connection refused" Nov 24 13:45:37 crc kubenswrapper[5039]: I1124 13:45:37.614459 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-g5jcx" event={"ID":"831424c5-4dbf-4e75-871a-be5c0c7d64a2","Type":"ContainerStarted","Data":"d283c7de95e7568abc09709088e0a0bbd791492c1b4fecfc5e8cbbf48573e280"} Nov 24 13:45:38 crc kubenswrapper[5039]: I1124 13:45:38.627171 5039 generic.go:334] "Generic (PLEG): container finished" podID="6da7bf95-9494-43c0-be01-d2170fe36b61" containerID="dfbd8717f8130fcbd0047bc6b44c0ceb4ee31d3c805868dd0686a1ad76ed3b11" exitCode=0 Nov 24 13:45:38 crc kubenswrapper[5039]: I1124 13:45:38.627214 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg" event={"ID":"6da7bf95-9494-43c0-be01-d2170fe36b61","Type":"ContainerDied","Data":"dfbd8717f8130fcbd0047bc6b44c0ceb4ee31d3c805868dd0686a1ad76ed3b11"} Nov 24 13:45:42 crc kubenswrapper[5039]: I1124 13:45:42.520042 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg" Nov 24 13:45:42 crc kubenswrapper[5039]: I1124 13:45:42.675824 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg" event={"ID":"6da7bf95-9494-43c0-be01-d2170fe36b61","Type":"ContainerDied","Data":"aedbf150535116808adfac8e3d183e735f1b79119a183ec93b3d19dc12c5e5ea"} Nov 24 13:45:42 crc kubenswrapper[5039]: I1124 13:45:42.675869 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aedbf150535116808adfac8e3d183e735f1b79119a183ec93b3d19dc12c5e5ea" Nov 24 13:45:42 crc kubenswrapper[5039]: I1124 13:45:42.675887 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg" Nov 24 13:45:42 crc kubenswrapper[5039]: I1124 13:45:42.705323 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dhzh7\" (UniqueName: \"kubernetes.io/projected/6da7bf95-9494-43c0-be01-d2170fe36b61-kube-api-access-dhzh7\") pod \"6da7bf95-9494-43c0-be01-d2170fe36b61\" (UID: \"6da7bf95-9494-43c0-be01-d2170fe36b61\") " Nov 24 13:45:42 crc kubenswrapper[5039]: I1124 13:45:42.705685 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6da7bf95-9494-43c0-be01-d2170fe36b61-inventory\") pod \"6da7bf95-9494-43c0-be01-d2170fe36b61\" (UID: \"6da7bf95-9494-43c0-be01-d2170fe36b61\") " Nov 24 13:45:42 crc kubenswrapper[5039]: I1124 13:45:42.705751 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6da7bf95-9494-43c0-be01-d2170fe36b61-repo-setup-combined-ca-bundle\") pod \"6da7bf95-9494-43c0-be01-d2170fe36b61\" (UID: \"6da7bf95-9494-43c0-be01-d2170fe36b61\") " Nov 24 13:45:42 crc kubenswrapper[5039]: I1124 13:45:42.705792 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6da7bf95-9494-43c0-be01-d2170fe36b61-ssh-key\") pod \"6da7bf95-9494-43c0-be01-d2170fe36b61\" (UID: \"6da7bf95-9494-43c0-be01-d2170fe36b61\") " Nov 24 13:45:42 crc kubenswrapper[5039]: I1124 13:45:42.710890 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6da7bf95-9494-43c0-be01-d2170fe36b61-kube-api-access-dhzh7" (OuterVolumeSpecName: "kube-api-access-dhzh7") pod "6da7bf95-9494-43c0-be01-d2170fe36b61" (UID: "6da7bf95-9494-43c0-be01-d2170fe36b61"). InnerVolumeSpecName "kube-api-access-dhzh7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:45:42 crc kubenswrapper[5039]: I1124 13:45:42.722153 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6da7bf95-9494-43c0-be01-d2170fe36b61-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "6da7bf95-9494-43c0-be01-d2170fe36b61" (UID: "6da7bf95-9494-43c0-be01-d2170fe36b61"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:42 crc kubenswrapper[5039]: I1124 13:45:42.743964 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6da7bf95-9494-43c0-be01-d2170fe36b61-inventory" (OuterVolumeSpecName: "inventory") pod "6da7bf95-9494-43c0-be01-d2170fe36b61" (UID: "6da7bf95-9494-43c0-be01-d2170fe36b61"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:42 crc kubenswrapper[5039]: I1124 13:45:42.768067 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6da7bf95-9494-43c0-be01-d2170fe36b61-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6da7bf95-9494-43c0-be01-d2170fe36b61" (UID: "6da7bf95-9494-43c0-be01-d2170fe36b61"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:42 crc kubenswrapper[5039]: I1124 13:45:42.808296 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6da7bf95-9494-43c0-be01-d2170fe36b61-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:42 crc kubenswrapper[5039]: I1124 13:45:42.808325 5039 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6da7bf95-9494-43c0-be01-d2170fe36b61-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:42 crc kubenswrapper[5039]: I1124 13:45:42.808336 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6da7bf95-9494-43c0-be01-d2170fe36b61-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:42 crc kubenswrapper[5039]: I1124 13:45:42.808347 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dhzh7\" (UniqueName: \"kubernetes.io/projected/6da7bf95-9494-43c0-be01-d2170fe36b61-kube-api-access-dhzh7\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.612604 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd"] Nov 24 13:45:43 crc kubenswrapper[5039]: E1124 13:45:43.613685 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6da7bf95-9494-43c0-be01-d2170fe36b61" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.613710 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="6da7bf95-9494-43c0-be01-d2170fe36b61" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.614115 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="6da7bf95-9494-43c0-be01-d2170fe36b61" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.615349 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.618941 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.621000 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.621177 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.621317 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.622384 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd"] Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.686266 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-g5jcx" event={"ID":"831424c5-4dbf-4e75-871a-be5c0c7d64a2","Type":"ContainerStarted","Data":"4bc284743209699ddfbf620b48a7ffbb657c3c40f2a03c998f1c814e65c4e3eb"} Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.711473 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-g5jcx" podStartSLOduration=3.49213411 podStartE2EDuration="9.711456118s" podCreationTimestamp="2025-11-24 13:45:34 +0000 UTC" firstStartedPulling="2025-11-24 13:45:36.89260324 +0000 UTC m=+1649.331727740" lastFinishedPulling="2025-11-24 13:45:43.111925248 +0000 UTC m=+1655.551049748" observedRunningTime="2025-11-24 13:45:43.701301849 +0000 UTC m=+1656.140426349" watchObservedRunningTime="2025-11-24 13:45:43.711456118 +0000 UTC m=+1656.150580618" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.730298 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a59c2a20-4a1f-4b68-aec2-5e2005f42418-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd\" (UID: \"a59c2a20-4a1f-4b68-aec2-5e2005f42418\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.730379 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a59c2a20-4a1f-4b68-aec2-5e2005f42418-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd\" (UID: \"a59c2a20-4a1f-4b68-aec2-5e2005f42418\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.730417 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a59c2a20-4a1f-4b68-aec2-5e2005f42418-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd\" (UID: \"a59c2a20-4a1f-4b68-aec2-5e2005f42418\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.730712 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pdl7\" (UniqueName: \"kubernetes.io/projected/a59c2a20-4a1f-4b68-aec2-5e2005f42418-kube-api-access-7pdl7\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd\" (UID: \"a59c2a20-4a1f-4b68-aec2-5e2005f42418\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.833180 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pdl7\" (UniqueName: \"kubernetes.io/projected/a59c2a20-4a1f-4b68-aec2-5e2005f42418-kube-api-access-7pdl7\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd\" (UID: \"a59c2a20-4a1f-4b68-aec2-5e2005f42418\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.833294 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a59c2a20-4a1f-4b68-aec2-5e2005f42418-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd\" (UID: \"a59c2a20-4a1f-4b68-aec2-5e2005f42418\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.833347 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a59c2a20-4a1f-4b68-aec2-5e2005f42418-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd\" (UID: \"a59c2a20-4a1f-4b68-aec2-5e2005f42418\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.833395 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a59c2a20-4a1f-4b68-aec2-5e2005f42418-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd\" (UID: \"a59c2a20-4a1f-4b68-aec2-5e2005f42418\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.839786 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a59c2a20-4a1f-4b68-aec2-5e2005f42418-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd\" (UID: \"a59c2a20-4a1f-4b68-aec2-5e2005f42418\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.839871 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a59c2a20-4a1f-4b68-aec2-5e2005f42418-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd\" (UID: \"a59c2a20-4a1f-4b68-aec2-5e2005f42418\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.841228 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a59c2a20-4a1f-4b68-aec2-5e2005f42418-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd\" (UID: \"a59c2a20-4a1f-4b68-aec2-5e2005f42418\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.854428 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pdl7\" (UniqueName: \"kubernetes.io/projected/a59c2a20-4a1f-4b68-aec2-5e2005f42418-kube-api-access-7pdl7\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd\" (UID: \"a59c2a20-4a1f-4b68-aec2-5e2005f42418\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd" Nov 24 13:45:43 crc kubenswrapper[5039]: I1124 13:45:43.937167 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd" Nov 24 13:45:44 crc kubenswrapper[5039]: I1124 13:45:44.382461 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 24 13:45:44 crc kubenswrapper[5039]: I1124 13:45:44.559647 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd"] Nov 24 13:45:44 crc kubenswrapper[5039]: I1124 13:45:44.698791 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd" event={"ID":"a59c2a20-4a1f-4b68-aec2-5e2005f42418","Type":"ContainerStarted","Data":"fde5ff6f630fb2f142d05eff20078350d6054eddb531527296168c24a71cf2bc"} Nov 24 13:45:45 crc kubenswrapper[5039]: I1124 13:45:45.709254 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd" event={"ID":"a59c2a20-4a1f-4b68-aec2-5e2005f42418","Type":"ContainerStarted","Data":"db2d40d56cbbf1c2559b007c38085278c481fa083c28d2a37ab39f26753af100"} Nov 24 13:45:45 crc kubenswrapper[5039]: I1124 13:45:45.713264 5039 generic.go:334] "Generic (PLEG): container finished" podID="ed94ecdc-8218-45f3-b908-7a2410b57196" containerID="1fec2c123f26a5e34dff68769cbf1db41e66f70dea0ceeb0bd524545dedee7b4" exitCode=0 Nov 24 13:45:45 crc kubenswrapper[5039]: I1124 13:45:45.713311 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-75cf4567b8-dlwgp" event={"ID":"ed94ecdc-8218-45f3-b908-7a2410b57196","Type":"ContainerDied","Data":"1fec2c123f26a5e34dff68769cbf1db41e66f70dea0ceeb0bd524545dedee7b4"} Nov 24 13:45:45 crc kubenswrapper[5039]: I1124 13:45:45.730949 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd" podStartSLOduration=2.303475496 podStartE2EDuration="2.73093145s" podCreationTimestamp="2025-11-24 13:45:43 +0000 UTC" firstStartedPulling="2025-11-24 13:45:44.565300449 +0000 UTC m=+1657.004424949" lastFinishedPulling="2025-11-24 13:45:44.992756403 +0000 UTC m=+1657.431880903" observedRunningTime="2025-11-24 13:45:45.725168639 +0000 UTC m=+1658.164293139" watchObservedRunningTime="2025-11-24 13:45:45.73093145 +0000 UTC m=+1658.170055950" Nov 24 13:45:45 crc kubenswrapper[5039]: I1124 13:45:45.896356 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-75cf4567b8-dlwgp" Nov 24 13:45:45 crc kubenswrapper[5039]: I1124 13:45:45.992763 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed94ecdc-8218-45f3-b908-7a2410b57196-combined-ca-bundle\") pod \"ed94ecdc-8218-45f3-b908-7a2410b57196\" (UID: \"ed94ecdc-8218-45f3-b908-7a2410b57196\") " Nov 24 13:45:45 crc kubenswrapper[5039]: I1124 13:45:45.992810 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfc8f\" (UniqueName: \"kubernetes.io/projected/ed94ecdc-8218-45f3-b908-7a2410b57196-kube-api-access-cfc8f\") pod \"ed94ecdc-8218-45f3-b908-7a2410b57196\" (UID: \"ed94ecdc-8218-45f3-b908-7a2410b57196\") " Nov 24 13:45:45 crc kubenswrapper[5039]: I1124 13:45:45.992922 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ed94ecdc-8218-45f3-b908-7a2410b57196-config-data-custom\") pod \"ed94ecdc-8218-45f3-b908-7a2410b57196\" (UID: \"ed94ecdc-8218-45f3-b908-7a2410b57196\") " Nov 24 13:45:45 crc kubenswrapper[5039]: I1124 13:45:45.993038 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed94ecdc-8218-45f3-b908-7a2410b57196-config-data\") pod \"ed94ecdc-8218-45f3-b908-7a2410b57196\" (UID: \"ed94ecdc-8218-45f3-b908-7a2410b57196\") " Nov 24 13:45:46 crc kubenswrapper[5039]: I1124 13:45:45.999106 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed94ecdc-8218-45f3-b908-7a2410b57196-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ed94ecdc-8218-45f3-b908-7a2410b57196" (UID: "ed94ecdc-8218-45f3-b908-7a2410b57196"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:46 crc kubenswrapper[5039]: I1124 13:45:46.006894 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed94ecdc-8218-45f3-b908-7a2410b57196-kube-api-access-cfc8f" (OuterVolumeSpecName: "kube-api-access-cfc8f") pod "ed94ecdc-8218-45f3-b908-7a2410b57196" (UID: "ed94ecdc-8218-45f3-b908-7a2410b57196"). InnerVolumeSpecName "kube-api-access-cfc8f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:45:46 crc kubenswrapper[5039]: I1124 13:45:46.077718 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed94ecdc-8218-45f3-b908-7a2410b57196-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ed94ecdc-8218-45f3-b908-7a2410b57196" (UID: "ed94ecdc-8218-45f3-b908-7a2410b57196"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:46 crc kubenswrapper[5039]: I1124 13:45:46.110968 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed94ecdc-8218-45f3-b908-7a2410b57196-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:46 crc kubenswrapper[5039]: I1124 13:45:46.111003 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfc8f\" (UniqueName: \"kubernetes.io/projected/ed94ecdc-8218-45f3-b908-7a2410b57196-kube-api-access-cfc8f\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:46 crc kubenswrapper[5039]: I1124 13:45:46.111018 5039 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ed94ecdc-8218-45f3-b908-7a2410b57196-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:46 crc kubenswrapper[5039]: I1124 13:45:46.124658 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed94ecdc-8218-45f3-b908-7a2410b57196-config-data" (OuterVolumeSpecName: "config-data") pod "ed94ecdc-8218-45f3-b908-7a2410b57196" (UID: "ed94ecdc-8218-45f3-b908-7a2410b57196"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:46 crc kubenswrapper[5039]: I1124 13:45:46.213383 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed94ecdc-8218-45f3-b908-7a2410b57196-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:46 crc kubenswrapper[5039]: I1124 13:45:46.726862 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-75cf4567b8-dlwgp" event={"ID":"ed94ecdc-8218-45f3-b908-7a2410b57196","Type":"ContainerDied","Data":"f250f7b576ac512a61366258108b02a5141b007c70cd0f3416cc175caf55df66"} Nov 24 13:45:46 crc kubenswrapper[5039]: I1124 13:45:46.726918 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-75cf4567b8-dlwgp" Nov 24 13:45:46 crc kubenswrapper[5039]: I1124 13:45:46.727282 5039 scope.go:117] "RemoveContainer" containerID="1fec2c123f26a5e34dff68769cbf1db41e66f70dea0ceeb0bd524545dedee7b4" Nov 24 13:45:46 crc kubenswrapper[5039]: I1124 13:45:46.729202 5039 generic.go:334] "Generic (PLEG): container finished" podID="831424c5-4dbf-4e75-871a-be5c0c7d64a2" containerID="4bc284743209699ddfbf620b48a7ffbb657c3c40f2a03c998f1c814e65c4e3eb" exitCode=0 Nov 24 13:45:46 crc kubenswrapper[5039]: I1124 13:45:46.729243 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-g5jcx" event={"ID":"831424c5-4dbf-4e75-871a-be5c0c7d64a2","Type":"ContainerDied","Data":"4bc284743209699ddfbf620b48a7ffbb657c3c40f2a03c998f1c814e65c4e3eb"} Nov 24 13:45:46 crc kubenswrapper[5039]: I1124 13:45:46.790576 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-75cf4567b8-dlwgp"] Nov 24 13:45:46 crc kubenswrapper[5039]: I1124 13:45:46.802397 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-75cf4567b8-dlwgp"] Nov 24 13:45:46 crc kubenswrapper[5039]: I1124 13:45:46.971023 5039 scope.go:117] "RemoveContainer" containerID="f7f255a5b5ef739628a977bd9d4f3b8aff132ae39584d5944ef9176a0bc70c5e" Nov 24 13:45:47 crc kubenswrapper[5039]: I1124 13:45:47.002039 5039 scope.go:117] "RemoveContainer" containerID="3ee43750eeed8c71cd0a28ca11d7eb172974b9ffbe34600ac7ee2219070b935b" Nov 24 13:45:47 crc kubenswrapper[5039]: I1124 13:45:47.026716 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 24 13:45:47 crc kubenswrapper[5039]: I1124 13:45:47.307465 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:45:47 crc kubenswrapper[5039]: E1124 13:45:47.307789 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:45:48 crc kubenswrapper[5039]: I1124 13:45:48.160464 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-g5jcx" Nov 24 13:45:48 crc kubenswrapper[5039]: I1124 13:45:48.259695 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8q7n5\" (UniqueName: \"kubernetes.io/projected/831424c5-4dbf-4e75-871a-be5c0c7d64a2-kube-api-access-8q7n5\") pod \"831424c5-4dbf-4e75-871a-be5c0c7d64a2\" (UID: \"831424c5-4dbf-4e75-871a-be5c0c7d64a2\") " Nov 24 13:45:48 crc kubenswrapper[5039]: I1124 13:45:48.259742 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/831424c5-4dbf-4e75-871a-be5c0c7d64a2-combined-ca-bundle\") pod \"831424c5-4dbf-4e75-871a-be5c0c7d64a2\" (UID: \"831424c5-4dbf-4e75-871a-be5c0c7d64a2\") " Nov 24 13:45:48 crc kubenswrapper[5039]: I1124 13:45:48.259814 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/831424c5-4dbf-4e75-871a-be5c0c7d64a2-config-data\") pod \"831424c5-4dbf-4e75-871a-be5c0c7d64a2\" (UID: \"831424c5-4dbf-4e75-871a-be5c0c7d64a2\") " Nov 24 13:45:48 crc kubenswrapper[5039]: I1124 13:45:48.259881 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/831424c5-4dbf-4e75-871a-be5c0c7d64a2-scripts\") pod \"831424c5-4dbf-4e75-871a-be5c0c7d64a2\" (UID: \"831424c5-4dbf-4e75-871a-be5c0c7d64a2\") " Nov 24 13:45:48 crc kubenswrapper[5039]: I1124 13:45:48.267755 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/831424c5-4dbf-4e75-871a-be5c0c7d64a2-kube-api-access-8q7n5" (OuterVolumeSpecName: "kube-api-access-8q7n5") pod "831424c5-4dbf-4e75-871a-be5c0c7d64a2" (UID: "831424c5-4dbf-4e75-871a-be5c0c7d64a2"). InnerVolumeSpecName "kube-api-access-8q7n5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:45:48 crc kubenswrapper[5039]: I1124 13:45:48.269647 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/831424c5-4dbf-4e75-871a-be5c0c7d64a2-scripts" (OuterVolumeSpecName: "scripts") pod "831424c5-4dbf-4e75-871a-be5c0c7d64a2" (UID: "831424c5-4dbf-4e75-871a-be5c0c7d64a2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:48 crc kubenswrapper[5039]: I1124 13:45:48.293238 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/831424c5-4dbf-4e75-871a-be5c0c7d64a2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "831424c5-4dbf-4e75-871a-be5c0c7d64a2" (UID: "831424c5-4dbf-4e75-871a-be5c0c7d64a2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:48 crc kubenswrapper[5039]: I1124 13:45:48.294383 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/831424c5-4dbf-4e75-871a-be5c0c7d64a2-config-data" (OuterVolumeSpecName: "config-data") pod "831424c5-4dbf-4e75-871a-be5c0c7d64a2" (UID: "831424c5-4dbf-4e75-871a-be5c0c7d64a2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:48 crc kubenswrapper[5039]: I1124 13:45:48.324145 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed94ecdc-8218-45f3-b908-7a2410b57196" path="/var/lib/kubelet/pods/ed94ecdc-8218-45f3-b908-7a2410b57196/volumes" Nov 24 13:45:48 crc kubenswrapper[5039]: I1124 13:45:48.362906 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8q7n5\" (UniqueName: \"kubernetes.io/projected/831424c5-4dbf-4e75-871a-be5c0c7d64a2-kube-api-access-8q7n5\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:48 crc kubenswrapper[5039]: I1124 13:45:48.362954 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/831424c5-4dbf-4e75-871a-be5c0c7d64a2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:48 crc kubenswrapper[5039]: I1124 13:45:48.362963 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/831424c5-4dbf-4e75-871a-be5c0c7d64a2-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:48 crc kubenswrapper[5039]: I1124 13:45:48.362972 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/831424c5-4dbf-4e75-871a-be5c0c7d64a2-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:48 crc kubenswrapper[5039]: I1124 13:45:48.751280 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-g5jcx" event={"ID":"831424c5-4dbf-4e75-871a-be5c0c7d64a2","Type":"ContainerDied","Data":"d283c7de95e7568abc09709088e0a0bbd791492c1b4fecfc5e8cbbf48573e280"} Nov 24 13:45:48 crc kubenswrapper[5039]: I1124 13:45:48.751323 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d283c7de95e7568abc09709088e0a0bbd791492c1b4fecfc5e8cbbf48573e280" Nov 24 13:45:48 crc kubenswrapper[5039]: I1124 13:45:48.751362 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-g5jcx" Nov 24 13:45:49 crc kubenswrapper[5039]: I1124 13:45:49.359052 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 24 13:45:49 crc kubenswrapper[5039]: I1124 13:45:49.359669 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="cc51777e-d169-47d1-bfe2-006a99d0ba7c" containerName="aodh-api" containerID="cri-o://c63d3b34d01c54e937720dc6e9a1e56385de8b6e7bf96ecf78a68d2aa6b1aa44" gracePeriod=30 Nov 24 13:45:49 crc kubenswrapper[5039]: I1124 13:45:49.359731 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="cc51777e-d169-47d1-bfe2-006a99d0ba7c" containerName="aodh-listener" containerID="cri-o://fa3376a8b799f58ed41cfeec6ce65c0643a72b63585966ef7aca2df99a3a8532" gracePeriod=30 Nov 24 13:45:49 crc kubenswrapper[5039]: I1124 13:45:49.359788 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="cc51777e-d169-47d1-bfe2-006a99d0ba7c" containerName="aodh-notifier" containerID="cri-o://51f9030c77524c934dd16d9fa87ee58b0f150e4b4496fd77da3e7952a12f856e" gracePeriod=30 Nov 24 13:45:49 crc kubenswrapper[5039]: I1124 13:45:49.359839 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="cc51777e-d169-47d1-bfe2-006a99d0ba7c" containerName="aodh-evaluator" containerID="cri-o://5dd3da925ba59309f32c6a17ab10a6d7316c69f7ad5c9695a71c88cff1a1a8f4" gracePeriod=30 Nov 24 13:45:49 crc kubenswrapper[5039]: I1124 13:45:49.764677 5039 generic.go:334] "Generic (PLEG): container finished" podID="cc51777e-d169-47d1-bfe2-006a99d0ba7c" containerID="c63d3b34d01c54e937720dc6e9a1e56385de8b6e7bf96ecf78a68d2aa6b1aa44" exitCode=0 Nov 24 13:45:49 crc kubenswrapper[5039]: I1124 13:45:49.764737 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"cc51777e-d169-47d1-bfe2-006a99d0ba7c","Type":"ContainerDied","Data":"c63d3b34d01c54e937720dc6e9a1e56385de8b6e7bf96ecf78a68d2aa6b1aa44"} Nov 24 13:45:50 crc kubenswrapper[5039]: I1124 13:45:50.776673 5039 generic.go:334] "Generic (PLEG): container finished" podID="cc51777e-d169-47d1-bfe2-006a99d0ba7c" containerID="fa3376a8b799f58ed41cfeec6ce65c0643a72b63585966ef7aca2df99a3a8532" exitCode=0 Nov 24 13:45:50 crc kubenswrapper[5039]: I1124 13:45:50.776976 5039 generic.go:334] "Generic (PLEG): container finished" podID="cc51777e-d169-47d1-bfe2-006a99d0ba7c" containerID="51f9030c77524c934dd16d9fa87ee58b0f150e4b4496fd77da3e7952a12f856e" exitCode=0 Nov 24 13:45:50 crc kubenswrapper[5039]: I1124 13:45:50.776985 5039 generic.go:334] "Generic (PLEG): container finished" podID="cc51777e-d169-47d1-bfe2-006a99d0ba7c" containerID="5dd3da925ba59309f32c6a17ab10a6d7316c69f7ad5c9695a71c88cff1a1a8f4" exitCode=0 Nov 24 13:45:50 crc kubenswrapper[5039]: I1124 13:45:50.776719 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"cc51777e-d169-47d1-bfe2-006a99d0ba7c","Type":"ContainerDied","Data":"fa3376a8b799f58ed41cfeec6ce65c0643a72b63585966ef7aca2df99a3a8532"} Nov 24 13:45:50 crc kubenswrapper[5039]: I1124 13:45:50.777029 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"cc51777e-d169-47d1-bfe2-006a99d0ba7c","Type":"ContainerDied","Data":"51f9030c77524c934dd16d9fa87ee58b0f150e4b4496fd77da3e7952a12f856e"} Nov 24 13:45:50 crc kubenswrapper[5039]: I1124 13:45:50.777042 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"cc51777e-d169-47d1-bfe2-006a99d0ba7c","Type":"ContainerDied","Data":"5dd3da925ba59309f32c6a17ab10a6d7316c69f7ad5c9695a71c88cff1a1a8f4"} Nov 24 13:45:50 crc kubenswrapper[5039]: I1124 13:45:50.903166 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.017868 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-86c8n\" (UniqueName: \"kubernetes.io/projected/cc51777e-d169-47d1-bfe2-006a99d0ba7c-kube-api-access-86c8n\") pod \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.017982 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-combined-ca-bundle\") pod \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.018017 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-public-tls-certs\") pod \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.018047 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-config-data\") pod \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.018195 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-internal-tls-certs\") pod \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.018268 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-scripts\") pod \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\" (UID: \"cc51777e-d169-47d1-bfe2-006a99d0ba7c\") " Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.024674 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-scripts" (OuterVolumeSpecName: "scripts") pod "cc51777e-d169-47d1-bfe2-006a99d0ba7c" (UID: "cc51777e-d169-47d1-bfe2-006a99d0ba7c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.025883 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc51777e-d169-47d1-bfe2-006a99d0ba7c-kube-api-access-86c8n" (OuterVolumeSpecName: "kube-api-access-86c8n") pod "cc51777e-d169-47d1-bfe2-006a99d0ba7c" (UID: "cc51777e-d169-47d1-bfe2-006a99d0ba7c"). InnerVolumeSpecName "kube-api-access-86c8n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.085454 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "cc51777e-d169-47d1-bfe2-006a99d0ba7c" (UID: "cc51777e-d169-47d1-bfe2-006a99d0ba7c"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.097197 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "cc51777e-d169-47d1-bfe2-006a99d0ba7c" (UID: "cc51777e-d169-47d1-bfe2-006a99d0ba7c"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.123705 5039 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.123772 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.123784 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-86c8n\" (UniqueName: \"kubernetes.io/projected/cc51777e-d169-47d1-bfe2-006a99d0ba7c-kube-api-access-86c8n\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.123796 5039 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.148416 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cc51777e-d169-47d1-bfe2-006a99d0ba7c" (UID: "cc51777e-d169-47d1-bfe2-006a99d0ba7c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.168891 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-config-data" (OuterVolumeSpecName: "config-data") pod "cc51777e-d169-47d1-bfe2-006a99d0ba7c" (UID: "cc51777e-d169-47d1-bfe2-006a99d0ba7c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.226341 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.226377 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc51777e-d169-47d1-bfe2-006a99d0ba7c-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.798681 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"cc51777e-d169-47d1-bfe2-006a99d0ba7c","Type":"ContainerDied","Data":"6cf190ed03996bcb9e0099229df6d350780f4a0e38269f6973ad8e4dd54c92b7"} Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.798757 5039 scope.go:117] "RemoveContainer" containerID="fa3376a8b799f58ed41cfeec6ce65c0643a72b63585966ef7aca2df99a3a8532" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.799010 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.825474 5039 scope.go:117] "RemoveContainer" containerID="51f9030c77524c934dd16d9fa87ee58b0f150e4b4496fd77da3e7952a12f856e" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.847315 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.865855 5039 scope.go:117] "RemoveContainer" containerID="5dd3da925ba59309f32c6a17ab10a6d7316c69f7ad5c9695a71c88cff1a1a8f4" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.867475 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.878265 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 24 13:45:51 crc kubenswrapper[5039]: E1124 13:45:51.878826 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc51777e-d169-47d1-bfe2-006a99d0ba7c" containerName="aodh-listener" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.878850 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc51777e-d169-47d1-bfe2-006a99d0ba7c" containerName="aodh-listener" Nov 24 13:45:51 crc kubenswrapper[5039]: E1124 13:45:51.878865 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="831424c5-4dbf-4e75-871a-be5c0c7d64a2" containerName="aodh-db-sync" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.878872 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="831424c5-4dbf-4e75-871a-be5c0c7d64a2" containerName="aodh-db-sync" Nov 24 13:45:51 crc kubenswrapper[5039]: E1124 13:45:51.878888 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc51777e-d169-47d1-bfe2-006a99d0ba7c" containerName="aodh-evaluator" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.878896 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc51777e-d169-47d1-bfe2-006a99d0ba7c" containerName="aodh-evaluator" Nov 24 13:45:51 crc kubenswrapper[5039]: E1124 13:45:51.878914 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed94ecdc-8218-45f3-b908-7a2410b57196" containerName="heat-engine" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.878923 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed94ecdc-8218-45f3-b908-7a2410b57196" containerName="heat-engine" Nov 24 13:45:51 crc kubenswrapper[5039]: E1124 13:45:51.878934 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc51777e-d169-47d1-bfe2-006a99d0ba7c" containerName="aodh-notifier" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.878940 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc51777e-d169-47d1-bfe2-006a99d0ba7c" containerName="aodh-notifier" Nov 24 13:45:51 crc kubenswrapper[5039]: E1124 13:45:51.878963 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc51777e-d169-47d1-bfe2-006a99d0ba7c" containerName="aodh-api" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.878969 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc51777e-d169-47d1-bfe2-006a99d0ba7c" containerName="aodh-api" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.879189 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc51777e-d169-47d1-bfe2-006a99d0ba7c" containerName="aodh-listener" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.879206 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc51777e-d169-47d1-bfe2-006a99d0ba7c" containerName="aodh-api" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.879226 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc51777e-d169-47d1-bfe2-006a99d0ba7c" containerName="aodh-evaluator" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.879241 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc51777e-d169-47d1-bfe2-006a99d0ba7c" containerName="aodh-notifier" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.879258 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="831424c5-4dbf-4e75-871a-be5c0c7d64a2" containerName="aodh-db-sync" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.879275 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed94ecdc-8218-45f3-b908-7a2410b57196" containerName="heat-engine" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.881748 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.885115 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.885307 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.885614 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.885743 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-k6pb7" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.890153 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.896500 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 24 13:45:51 crc kubenswrapper[5039]: I1124 13:45:51.900240 5039 scope.go:117] "RemoveContainer" containerID="c63d3b34d01c54e937720dc6e9a1e56385de8b6e7bf96ecf78a68d2aa6b1aa44" Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.042226 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0201ebd4-dc90-4332-b036-38d4d2a1ea2a-config-data\") pod \"aodh-0\" (UID: \"0201ebd4-dc90-4332-b036-38d4d2a1ea2a\") " pod="openstack/aodh-0" Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.042296 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0201ebd4-dc90-4332-b036-38d4d2a1ea2a-combined-ca-bundle\") pod \"aodh-0\" (UID: \"0201ebd4-dc90-4332-b036-38d4d2a1ea2a\") " pod="openstack/aodh-0" Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.042348 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8kl9p\" (UniqueName: \"kubernetes.io/projected/0201ebd4-dc90-4332-b036-38d4d2a1ea2a-kube-api-access-8kl9p\") pod \"aodh-0\" (UID: \"0201ebd4-dc90-4332-b036-38d4d2a1ea2a\") " pod="openstack/aodh-0" Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.042399 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0201ebd4-dc90-4332-b036-38d4d2a1ea2a-scripts\") pod \"aodh-0\" (UID: \"0201ebd4-dc90-4332-b036-38d4d2a1ea2a\") " pod="openstack/aodh-0" Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.042753 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0201ebd4-dc90-4332-b036-38d4d2a1ea2a-internal-tls-certs\") pod \"aodh-0\" (UID: \"0201ebd4-dc90-4332-b036-38d4d2a1ea2a\") " pod="openstack/aodh-0" Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.042912 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0201ebd4-dc90-4332-b036-38d4d2a1ea2a-public-tls-certs\") pod \"aodh-0\" (UID: \"0201ebd4-dc90-4332-b036-38d4d2a1ea2a\") " pod="openstack/aodh-0" Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.144309 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0201ebd4-dc90-4332-b036-38d4d2a1ea2a-internal-tls-certs\") pod \"aodh-0\" (UID: \"0201ebd4-dc90-4332-b036-38d4d2a1ea2a\") " pod="openstack/aodh-0" Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.144404 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0201ebd4-dc90-4332-b036-38d4d2a1ea2a-public-tls-certs\") pod \"aodh-0\" (UID: \"0201ebd4-dc90-4332-b036-38d4d2a1ea2a\") " pod="openstack/aodh-0" Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.144513 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0201ebd4-dc90-4332-b036-38d4d2a1ea2a-config-data\") pod \"aodh-0\" (UID: \"0201ebd4-dc90-4332-b036-38d4d2a1ea2a\") " pod="openstack/aodh-0" Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.144543 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0201ebd4-dc90-4332-b036-38d4d2a1ea2a-combined-ca-bundle\") pod \"aodh-0\" (UID: \"0201ebd4-dc90-4332-b036-38d4d2a1ea2a\") " pod="openstack/aodh-0" Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.144578 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8kl9p\" (UniqueName: \"kubernetes.io/projected/0201ebd4-dc90-4332-b036-38d4d2a1ea2a-kube-api-access-8kl9p\") pod \"aodh-0\" (UID: \"0201ebd4-dc90-4332-b036-38d4d2a1ea2a\") " pod="openstack/aodh-0" Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.144603 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0201ebd4-dc90-4332-b036-38d4d2a1ea2a-scripts\") pod \"aodh-0\" (UID: \"0201ebd4-dc90-4332-b036-38d4d2a1ea2a\") " pod="openstack/aodh-0" Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.159175 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0201ebd4-dc90-4332-b036-38d4d2a1ea2a-scripts\") pod \"aodh-0\" (UID: \"0201ebd4-dc90-4332-b036-38d4d2a1ea2a\") " pod="openstack/aodh-0" Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.159394 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0201ebd4-dc90-4332-b036-38d4d2a1ea2a-public-tls-certs\") pod \"aodh-0\" (UID: \"0201ebd4-dc90-4332-b036-38d4d2a1ea2a\") " pod="openstack/aodh-0" Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.159944 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0201ebd4-dc90-4332-b036-38d4d2a1ea2a-combined-ca-bundle\") pod \"aodh-0\" (UID: \"0201ebd4-dc90-4332-b036-38d4d2a1ea2a\") " pod="openstack/aodh-0" Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.160061 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0201ebd4-dc90-4332-b036-38d4d2a1ea2a-config-data\") pod \"aodh-0\" (UID: \"0201ebd4-dc90-4332-b036-38d4d2a1ea2a\") " pod="openstack/aodh-0" Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.161554 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0201ebd4-dc90-4332-b036-38d4d2a1ea2a-internal-tls-certs\") pod \"aodh-0\" (UID: \"0201ebd4-dc90-4332-b036-38d4d2a1ea2a\") " pod="openstack/aodh-0" Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.162124 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8kl9p\" (UniqueName: \"kubernetes.io/projected/0201ebd4-dc90-4332-b036-38d4d2a1ea2a-kube-api-access-8kl9p\") pod \"aodh-0\" (UID: \"0201ebd4-dc90-4332-b036-38d4d2a1ea2a\") " pod="openstack/aodh-0" Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.212552 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.330195 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc51777e-d169-47d1-bfe2-006a99d0ba7c" path="/var/lib/kubelet/pods/cc51777e-d169-47d1-bfe2-006a99d0ba7c/volumes" Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.664288 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 24 13:45:52 crc kubenswrapper[5039]: I1124 13:45:52.809553 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0201ebd4-dc90-4332-b036-38d4d2a1ea2a","Type":"ContainerStarted","Data":"720cfa6e47fa4fdc71a49d8931ed38f828df2fa098c100e91e6b792aad801a32"} Nov 24 13:45:53 crc kubenswrapper[5039]: I1124 13:45:53.823690 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0201ebd4-dc90-4332-b036-38d4d2a1ea2a","Type":"ContainerStarted","Data":"9f60e76ec66c2a6f31d17f0f70e9194f803ec4f90c0b426d2579c58a3e24d718"} Nov 24 13:45:54 crc kubenswrapper[5039]: I1124 13:45:54.836456 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0201ebd4-dc90-4332-b036-38d4d2a1ea2a","Type":"ContainerStarted","Data":"ee451f63fa217ae30dad949c2f2f9d1cfd2cb8deeac181a6092e7c24add9625e"} Nov 24 13:45:55 crc kubenswrapper[5039]: I1124 13:45:55.852638 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0201ebd4-dc90-4332-b036-38d4d2a1ea2a","Type":"ContainerStarted","Data":"c3e5a9b6083f5e178a5c9d024c30661249ac1bdc59a4cab0f2f8e883fb97dd74"} Nov 24 13:45:56 crc kubenswrapper[5039]: I1124 13:45:56.866214 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0201ebd4-dc90-4332-b036-38d4d2a1ea2a","Type":"ContainerStarted","Data":"1fb5c2e2f35fdb92b5f21be1946bc0c94f6745d2a8b3b7c30400c19797ef87c4"} Nov 24 13:45:56 crc kubenswrapper[5039]: I1124 13:45:56.886151 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.039403405 podStartE2EDuration="5.886128228s" podCreationTimestamp="2025-11-24 13:45:51 +0000 UTC" firstStartedPulling="2025-11-24 13:45:52.674182116 +0000 UTC m=+1665.113306616" lastFinishedPulling="2025-11-24 13:45:56.520906949 +0000 UTC m=+1668.960031439" observedRunningTime="2025-11-24 13:45:56.884013897 +0000 UTC m=+1669.323138417" watchObservedRunningTime="2025-11-24 13:45:56.886128228 +0000 UTC m=+1669.325252768" Nov 24 13:46:01 crc kubenswrapper[5039]: I1124 13:46:01.307898 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:46:01 crc kubenswrapper[5039]: E1124 13:46:01.308863 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:46:14 crc kubenswrapper[5039]: I1124 13:46:14.306547 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:46:14 crc kubenswrapper[5039]: E1124 13:46:14.307417 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:46:29 crc kubenswrapper[5039]: I1124 13:46:29.307724 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:46:29 crc kubenswrapper[5039]: E1124 13:46:29.308593 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:46:41 crc kubenswrapper[5039]: I1124 13:46:41.307011 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:46:41 crc kubenswrapper[5039]: E1124 13:46:41.307816 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:46:47 crc kubenswrapper[5039]: I1124 13:46:47.251234 5039 scope.go:117] "RemoveContainer" containerID="6fac4d240154030f2adbe813a51f23a6104b252d2fe89d53286245577c472cfb" Nov 24 13:46:47 crc kubenswrapper[5039]: I1124 13:46:47.289230 5039 scope.go:117] "RemoveContainer" containerID="b9b4a336c29ad683550561fdb764ff313aac831a347ffd5f562bebbebe1c2365" Nov 24 13:46:53 crc kubenswrapper[5039]: I1124 13:46:53.306537 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:46:53 crc kubenswrapper[5039]: E1124 13:46:53.307365 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:47:07 crc kubenswrapper[5039]: I1124 13:47:07.306489 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:47:07 crc kubenswrapper[5039]: E1124 13:47:07.307305 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:47:22 crc kubenswrapper[5039]: I1124 13:47:22.306468 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:47:22 crc kubenswrapper[5039]: E1124 13:47:22.307241 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:47:35 crc kubenswrapper[5039]: I1124 13:47:35.307314 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:47:35 crc kubenswrapper[5039]: E1124 13:47:35.308122 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:47:46 crc kubenswrapper[5039]: I1124 13:47:46.307104 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:47:46 crc kubenswrapper[5039]: E1124 13:47:46.308333 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:47:50 crc kubenswrapper[5039]: I1124 13:47:50.584406 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-kd2bb"] Nov 24 13:47:50 crc kubenswrapper[5039]: I1124 13:47:50.587121 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kd2bb" Nov 24 13:47:50 crc kubenswrapper[5039]: I1124 13:47:50.615068 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kd2bb"] Nov 24 13:47:50 crc kubenswrapper[5039]: I1124 13:47:50.653754 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ba28fd7-afae-4374-a4e4-3c0d969f8b96-catalog-content\") pod \"redhat-operators-kd2bb\" (UID: \"9ba28fd7-afae-4374-a4e4-3c0d969f8b96\") " pod="openshift-marketplace/redhat-operators-kd2bb" Nov 24 13:47:50 crc kubenswrapper[5039]: I1124 13:47:50.653876 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6h4fk\" (UniqueName: \"kubernetes.io/projected/9ba28fd7-afae-4374-a4e4-3c0d969f8b96-kube-api-access-6h4fk\") pod \"redhat-operators-kd2bb\" (UID: \"9ba28fd7-afae-4374-a4e4-3c0d969f8b96\") " pod="openshift-marketplace/redhat-operators-kd2bb" Nov 24 13:47:50 crc kubenswrapper[5039]: I1124 13:47:50.654177 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ba28fd7-afae-4374-a4e4-3c0d969f8b96-utilities\") pod \"redhat-operators-kd2bb\" (UID: \"9ba28fd7-afae-4374-a4e4-3c0d969f8b96\") " pod="openshift-marketplace/redhat-operators-kd2bb" Nov 24 13:47:50 crc kubenswrapper[5039]: I1124 13:47:50.756063 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ba28fd7-afae-4374-a4e4-3c0d969f8b96-catalog-content\") pod \"redhat-operators-kd2bb\" (UID: \"9ba28fd7-afae-4374-a4e4-3c0d969f8b96\") " pod="openshift-marketplace/redhat-operators-kd2bb" Nov 24 13:47:50 crc kubenswrapper[5039]: I1124 13:47:50.756178 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6h4fk\" (UniqueName: \"kubernetes.io/projected/9ba28fd7-afae-4374-a4e4-3c0d969f8b96-kube-api-access-6h4fk\") pod \"redhat-operators-kd2bb\" (UID: \"9ba28fd7-afae-4374-a4e4-3c0d969f8b96\") " pod="openshift-marketplace/redhat-operators-kd2bb" Nov 24 13:47:50 crc kubenswrapper[5039]: I1124 13:47:50.756330 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ba28fd7-afae-4374-a4e4-3c0d969f8b96-utilities\") pod \"redhat-operators-kd2bb\" (UID: \"9ba28fd7-afae-4374-a4e4-3c0d969f8b96\") " pod="openshift-marketplace/redhat-operators-kd2bb" Nov 24 13:47:50 crc kubenswrapper[5039]: I1124 13:47:50.756637 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ba28fd7-afae-4374-a4e4-3c0d969f8b96-catalog-content\") pod \"redhat-operators-kd2bb\" (UID: \"9ba28fd7-afae-4374-a4e4-3c0d969f8b96\") " pod="openshift-marketplace/redhat-operators-kd2bb" Nov 24 13:47:50 crc kubenswrapper[5039]: I1124 13:47:50.756817 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ba28fd7-afae-4374-a4e4-3c0d969f8b96-utilities\") pod \"redhat-operators-kd2bb\" (UID: \"9ba28fd7-afae-4374-a4e4-3c0d969f8b96\") " pod="openshift-marketplace/redhat-operators-kd2bb" Nov 24 13:47:50 crc kubenswrapper[5039]: I1124 13:47:50.787747 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6h4fk\" (UniqueName: \"kubernetes.io/projected/9ba28fd7-afae-4374-a4e4-3c0d969f8b96-kube-api-access-6h4fk\") pod \"redhat-operators-kd2bb\" (UID: \"9ba28fd7-afae-4374-a4e4-3c0d969f8b96\") " pod="openshift-marketplace/redhat-operators-kd2bb" Nov 24 13:47:50 crc kubenswrapper[5039]: I1124 13:47:50.934556 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kd2bb" Nov 24 13:47:51 crc kubenswrapper[5039]: I1124 13:47:51.446108 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kd2bb"] Nov 24 13:47:52 crc kubenswrapper[5039]: I1124 13:47:52.152090 5039 generic.go:334] "Generic (PLEG): container finished" podID="9ba28fd7-afae-4374-a4e4-3c0d969f8b96" containerID="2baaf27e5946afc539d5eab9f4631d708b6897adfd4ab3a64f4c8be0c8a6810c" exitCode=0 Nov 24 13:47:52 crc kubenswrapper[5039]: I1124 13:47:52.152162 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kd2bb" event={"ID":"9ba28fd7-afae-4374-a4e4-3c0d969f8b96","Type":"ContainerDied","Data":"2baaf27e5946afc539d5eab9f4631d708b6897adfd4ab3a64f4c8be0c8a6810c"} Nov 24 13:47:52 crc kubenswrapper[5039]: I1124 13:47:52.152444 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kd2bb" event={"ID":"9ba28fd7-afae-4374-a4e4-3c0d969f8b96","Type":"ContainerStarted","Data":"aa5333cd16165d7d3ea15409a51adc04629f219d2615e351229632819f0f6eea"} Nov 24 13:47:53 crc kubenswrapper[5039]: I1124 13:47:53.163217 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kd2bb" event={"ID":"9ba28fd7-afae-4374-a4e4-3c0d969f8b96","Type":"ContainerStarted","Data":"9c779e36f7f424c81d29419e92274abd02abd72a097f839019ceca2d71dc18f0"} Nov 24 13:47:57 crc kubenswrapper[5039]: I1124 13:47:57.216297 5039 generic.go:334] "Generic (PLEG): container finished" podID="9ba28fd7-afae-4374-a4e4-3c0d969f8b96" containerID="9c779e36f7f424c81d29419e92274abd02abd72a097f839019ceca2d71dc18f0" exitCode=0 Nov 24 13:47:57 crc kubenswrapper[5039]: I1124 13:47:57.216420 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kd2bb" event={"ID":"9ba28fd7-afae-4374-a4e4-3c0d969f8b96","Type":"ContainerDied","Data":"9c779e36f7f424c81d29419e92274abd02abd72a097f839019ceca2d71dc18f0"} Nov 24 13:47:57 crc kubenswrapper[5039]: I1124 13:47:57.307580 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:47:57 crc kubenswrapper[5039]: E1124 13:47:57.307898 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:47:58 crc kubenswrapper[5039]: I1124 13:47:58.228640 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kd2bb" event={"ID":"9ba28fd7-afae-4374-a4e4-3c0d969f8b96","Type":"ContainerStarted","Data":"29b0c14f2330b59e0ac450e57fd6c06a0a8f58a5c3d0c82b54569dfc77649fab"} Nov 24 13:47:58 crc kubenswrapper[5039]: I1124 13:47:58.249443 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-kd2bb" podStartSLOduration=2.649310017 podStartE2EDuration="8.24942099s" podCreationTimestamp="2025-11-24 13:47:50 +0000 UTC" firstStartedPulling="2025-11-24 13:47:52.154487216 +0000 UTC m=+1784.593611716" lastFinishedPulling="2025-11-24 13:47:57.754598189 +0000 UTC m=+1790.193722689" observedRunningTime="2025-11-24 13:47:58.243779102 +0000 UTC m=+1790.682903602" watchObservedRunningTime="2025-11-24 13:47:58.24942099 +0000 UTC m=+1790.688545490" Nov 24 13:48:01 crc kubenswrapper[5039]: I1124 13:48:01.777873 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="1dcf47d4-1399-46bb-bda8-5dfeb96a3b60" containerName="galera" probeResult="failure" output="command timed out" Nov 24 13:48:01 crc kubenswrapper[5039]: I1124 13:48:01.778755 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="1dcf47d4-1399-46bb-bda8-5dfeb96a3b60" containerName="galera" probeResult="failure" output="command timed out" Nov 24 13:48:01 crc kubenswrapper[5039]: I1124 13:48:01.796584 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-kd2bb" Nov 24 13:48:01 crc kubenswrapper[5039]: I1124 13:48:01.796613 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-kd2bb" Nov 24 13:48:02 crc kubenswrapper[5039]: I1124 13:48:02.881464 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-kd2bb" podUID="9ba28fd7-afae-4374-a4e4-3c0d969f8b96" containerName="registry-server" probeResult="failure" output=< Nov 24 13:48:02 crc kubenswrapper[5039]: timeout: failed to connect service ":50051" within 1s Nov 24 13:48:02 crc kubenswrapper[5039]: > Nov 24 13:48:10 crc kubenswrapper[5039]: I1124 13:48:10.307565 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:48:10 crc kubenswrapper[5039]: E1124 13:48:10.308347 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:48:10 crc kubenswrapper[5039]: I1124 13:48:10.988575 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-kd2bb" Nov 24 13:48:11 crc kubenswrapper[5039]: I1124 13:48:11.044162 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-kd2bb" Nov 24 13:48:11 crc kubenswrapper[5039]: I1124 13:48:11.221945 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kd2bb"] Nov 24 13:48:12 crc kubenswrapper[5039]: I1124 13:48:12.926933 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-kd2bb" podUID="9ba28fd7-afae-4374-a4e4-3c0d969f8b96" containerName="registry-server" containerID="cri-o://29b0c14f2330b59e0ac450e57fd6c06a0a8f58a5c3d0c82b54569dfc77649fab" gracePeriod=2 Nov 24 13:48:13 crc kubenswrapper[5039]: I1124 13:48:13.391316 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kd2bb" Nov 24 13:48:13 crc kubenswrapper[5039]: I1124 13:48:13.522686 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ba28fd7-afae-4374-a4e4-3c0d969f8b96-utilities\") pod \"9ba28fd7-afae-4374-a4e4-3c0d969f8b96\" (UID: \"9ba28fd7-afae-4374-a4e4-3c0d969f8b96\") " Nov 24 13:48:13 crc kubenswrapper[5039]: I1124 13:48:13.522776 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ba28fd7-afae-4374-a4e4-3c0d969f8b96-catalog-content\") pod \"9ba28fd7-afae-4374-a4e4-3c0d969f8b96\" (UID: \"9ba28fd7-afae-4374-a4e4-3c0d969f8b96\") " Nov 24 13:48:13 crc kubenswrapper[5039]: I1124 13:48:13.522873 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6h4fk\" (UniqueName: \"kubernetes.io/projected/9ba28fd7-afae-4374-a4e4-3c0d969f8b96-kube-api-access-6h4fk\") pod \"9ba28fd7-afae-4374-a4e4-3c0d969f8b96\" (UID: \"9ba28fd7-afae-4374-a4e4-3c0d969f8b96\") " Nov 24 13:48:13 crc kubenswrapper[5039]: I1124 13:48:13.523605 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ba28fd7-afae-4374-a4e4-3c0d969f8b96-utilities" (OuterVolumeSpecName: "utilities") pod "9ba28fd7-afae-4374-a4e4-3c0d969f8b96" (UID: "9ba28fd7-afae-4374-a4e4-3c0d969f8b96"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:48:13 crc kubenswrapper[5039]: I1124 13:48:13.558831 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ba28fd7-afae-4374-a4e4-3c0d969f8b96-kube-api-access-6h4fk" (OuterVolumeSpecName: "kube-api-access-6h4fk") pod "9ba28fd7-afae-4374-a4e4-3c0d969f8b96" (UID: "9ba28fd7-afae-4374-a4e4-3c0d969f8b96"). InnerVolumeSpecName "kube-api-access-6h4fk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:48:13 crc kubenswrapper[5039]: I1124 13:48:13.625153 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ba28fd7-afae-4374-a4e4-3c0d969f8b96-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:48:13 crc kubenswrapper[5039]: I1124 13:48:13.625192 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6h4fk\" (UniqueName: \"kubernetes.io/projected/9ba28fd7-afae-4374-a4e4-3c0d969f8b96-kube-api-access-6h4fk\") on node \"crc\" DevicePath \"\"" Nov 24 13:48:13 crc kubenswrapper[5039]: I1124 13:48:13.631126 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ba28fd7-afae-4374-a4e4-3c0d969f8b96-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9ba28fd7-afae-4374-a4e4-3c0d969f8b96" (UID: "9ba28fd7-afae-4374-a4e4-3c0d969f8b96"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:48:13 crc kubenswrapper[5039]: I1124 13:48:13.727651 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ba28fd7-afae-4374-a4e4-3c0d969f8b96-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:48:13 crc kubenswrapper[5039]: I1124 13:48:13.940450 5039 generic.go:334] "Generic (PLEG): container finished" podID="9ba28fd7-afae-4374-a4e4-3c0d969f8b96" containerID="29b0c14f2330b59e0ac450e57fd6c06a0a8f58a5c3d0c82b54569dfc77649fab" exitCode=0 Nov 24 13:48:13 crc kubenswrapper[5039]: I1124 13:48:13.940557 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kd2bb" Nov 24 13:48:13 crc kubenswrapper[5039]: I1124 13:48:13.940595 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kd2bb" event={"ID":"9ba28fd7-afae-4374-a4e4-3c0d969f8b96","Type":"ContainerDied","Data":"29b0c14f2330b59e0ac450e57fd6c06a0a8f58a5c3d0c82b54569dfc77649fab"} Nov 24 13:48:13 crc kubenswrapper[5039]: I1124 13:48:13.940637 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kd2bb" event={"ID":"9ba28fd7-afae-4374-a4e4-3c0d969f8b96","Type":"ContainerDied","Data":"aa5333cd16165d7d3ea15409a51adc04629f219d2615e351229632819f0f6eea"} Nov 24 13:48:13 crc kubenswrapper[5039]: I1124 13:48:13.940655 5039 scope.go:117] "RemoveContainer" containerID="29b0c14f2330b59e0ac450e57fd6c06a0a8f58a5c3d0c82b54569dfc77649fab" Nov 24 13:48:13 crc kubenswrapper[5039]: I1124 13:48:13.965660 5039 scope.go:117] "RemoveContainer" containerID="9c779e36f7f424c81d29419e92274abd02abd72a097f839019ceca2d71dc18f0" Nov 24 13:48:13 crc kubenswrapper[5039]: I1124 13:48:13.977463 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kd2bb"] Nov 24 13:48:13 crc kubenswrapper[5039]: I1124 13:48:13.990682 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-kd2bb"] Nov 24 13:48:14 crc kubenswrapper[5039]: I1124 13:48:14.006350 5039 scope.go:117] "RemoveContainer" containerID="2baaf27e5946afc539d5eab9f4631d708b6897adfd4ab3a64f4c8be0c8a6810c" Nov 24 13:48:14 crc kubenswrapper[5039]: I1124 13:48:14.062451 5039 scope.go:117] "RemoveContainer" containerID="29b0c14f2330b59e0ac450e57fd6c06a0a8f58a5c3d0c82b54569dfc77649fab" Nov 24 13:48:14 crc kubenswrapper[5039]: E1124 13:48:14.063946 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29b0c14f2330b59e0ac450e57fd6c06a0a8f58a5c3d0c82b54569dfc77649fab\": container with ID starting with 29b0c14f2330b59e0ac450e57fd6c06a0a8f58a5c3d0c82b54569dfc77649fab not found: ID does not exist" containerID="29b0c14f2330b59e0ac450e57fd6c06a0a8f58a5c3d0c82b54569dfc77649fab" Nov 24 13:48:14 crc kubenswrapper[5039]: I1124 13:48:14.063992 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29b0c14f2330b59e0ac450e57fd6c06a0a8f58a5c3d0c82b54569dfc77649fab"} err="failed to get container status \"29b0c14f2330b59e0ac450e57fd6c06a0a8f58a5c3d0c82b54569dfc77649fab\": rpc error: code = NotFound desc = could not find container \"29b0c14f2330b59e0ac450e57fd6c06a0a8f58a5c3d0c82b54569dfc77649fab\": container with ID starting with 29b0c14f2330b59e0ac450e57fd6c06a0a8f58a5c3d0c82b54569dfc77649fab not found: ID does not exist" Nov 24 13:48:14 crc kubenswrapper[5039]: I1124 13:48:14.064020 5039 scope.go:117] "RemoveContainer" containerID="9c779e36f7f424c81d29419e92274abd02abd72a097f839019ceca2d71dc18f0" Nov 24 13:48:14 crc kubenswrapper[5039]: E1124 13:48:14.066034 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c779e36f7f424c81d29419e92274abd02abd72a097f839019ceca2d71dc18f0\": container with ID starting with 9c779e36f7f424c81d29419e92274abd02abd72a097f839019ceca2d71dc18f0 not found: ID does not exist" containerID="9c779e36f7f424c81d29419e92274abd02abd72a097f839019ceca2d71dc18f0" Nov 24 13:48:14 crc kubenswrapper[5039]: I1124 13:48:14.066099 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c779e36f7f424c81d29419e92274abd02abd72a097f839019ceca2d71dc18f0"} err="failed to get container status \"9c779e36f7f424c81d29419e92274abd02abd72a097f839019ceca2d71dc18f0\": rpc error: code = NotFound desc = could not find container \"9c779e36f7f424c81d29419e92274abd02abd72a097f839019ceca2d71dc18f0\": container with ID starting with 9c779e36f7f424c81d29419e92274abd02abd72a097f839019ceca2d71dc18f0 not found: ID does not exist" Nov 24 13:48:14 crc kubenswrapper[5039]: I1124 13:48:14.066155 5039 scope.go:117] "RemoveContainer" containerID="2baaf27e5946afc539d5eab9f4631d708b6897adfd4ab3a64f4c8be0c8a6810c" Nov 24 13:48:14 crc kubenswrapper[5039]: E1124 13:48:14.066585 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2baaf27e5946afc539d5eab9f4631d708b6897adfd4ab3a64f4c8be0c8a6810c\": container with ID starting with 2baaf27e5946afc539d5eab9f4631d708b6897adfd4ab3a64f4c8be0c8a6810c not found: ID does not exist" containerID="2baaf27e5946afc539d5eab9f4631d708b6897adfd4ab3a64f4c8be0c8a6810c" Nov 24 13:48:14 crc kubenswrapper[5039]: I1124 13:48:14.066616 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2baaf27e5946afc539d5eab9f4631d708b6897adfd4ab3a64f4c8be0c8a6810c"} err="failed to get container status \"2baaf27e5946afc539d5eab9f4631d708b6897adfd4ab3a64f4c8be0c8a6810c\": rpc error: code = NotFound desc = could not find container \"2baaf27e5946afc539d5eab9f4631d708b6897adfd4ab3a64f4c8be0c8a6810c\": container with ID starting with 2baaf27e5946afc539d5eab9f4631d708b6897adfd4ab3a64f4c8be0c8a6810c not found: ID does not exist" Nov 24 13:48:14 crc kubenswrapper[5039]: I1124 13:48:14.319924 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ba28fd7-afae-4374-a4e4-3c0d969f8b96" path="/var/lib/kubelet/pods/9ba28fd7-afae-4374-a4e4-3c0d969f8b96/volumes" Nov 24 13:48:23 crc kubenswrapper[5039]: I1124 13:48:23.307915 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:48:24 crc kubenswrapper[5039]: I1124 13:48:24.051440 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"43f9e17fdc829b04a1d158fb340e5b63c9b87b25d3decfdb862bbf4e2559df49"} Nov 24 13:48:38 crc kubenswrapper[5039]: I1124 13:48:38.210333 5039 generic.go:334] "Generic (PLEG): container finished" podID="a59c2a20-4a1f-4b68-aec2-5e2005f42418" containerID="db2d40d56cbbf1c2559b007c38085278c481fa083c28d2a37ab39f26753af100" exitCode=0 Nov 24 13:48:38 crc kubenswrapper[5039]: I1124 13:48:38.210406 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd" event={"ID":"a59c2a20-4a1f-4b68-aec2-5e2005f42418","Type":"ContainerDied","Data":"db2d40d56cbbf1c2559b007c38085278c481fa083c28d2a37ab39f26753af100"} Nov 24 13:48:39 crc kubenswrapper[5039]: I1124 13:48:39.772735 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd" Nov 24 13:48:39 crc kubenswrapper[5039]: I1124 13:48:39.892994 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a59c2a20-4a1f-4b68-aec2-5e2005f42418-inventory\") pod \"a59c2a20-4a1f-4b68-aec2-5e2005f42418\" (UID: \"a59c2a20-4a1f-4b68-aec2-5e2005f42418\") " Nov 24 13:48:39 crc kubenswrapper[5039]: I1124 13:48:39.893098 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7pdl7\" (UniqueName: \"kubernetes.io/projected/a59c2a20-4a1f-4b68-aec2-5e2005f42418-kube-api-access-7pdl7\") pod \"a59c2a20-4a1f-4b68-aec2-5e2005f42418\" (UID: \"a59c2a20-4a1f-4b68-aec2-5e2005f42418\") " Nov 24 13:48:39 crc kubenswrapper[5039]: I1124 13:48:39.893189 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a59c2a20-4a1f-4b68-aec2-5e2005f42418-ssh-key\") pod \"a59c2a20-4a1f-4b68-aec2-5e2005f42418\" (UID: \"a59c2a20-4a1f-4b68-aec2-5e2005f42418\") " Nov 24 13:48:39 crc kubenswrapper[5039]: I1124 13:48:39.893275 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a59c2a20-4a1f-4b68-aec2-5e2005f42418-bootstrap-combined-ca-bundle\") pod \"a59c2a20-4a1f-4b68-aec2-5e2005f42418\" (UID: \"a59c2a20-4a1f-4b68-aec2-5e2005f42418\") " Nov 24 13:48:39 crc kubenswrapper[5039]: I1124 13:48:39.899584 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a59c2a20-4a1f-4b68-aec2-5e2005f42418-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "a59c2a20-4a1f-4b68-aec2-5e2005f42418" (UID: "a59c2a20-4a1f-4b68-aec2-5e2005f42418"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:48:39 crc kubenswrapper[5039]: I1124 13:48:39.900827 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a59c2a20-4a1f-4b68-aec2-5e2005f42418-kube-api-access-7pdl7" (OuterVolumeSpecName: "kube-api-access-7pdl7") pod "a59c2a20-4a1f-4b68-aec2-5e2005f42418" (UID: "a59c2a20-4a1f-4b68-aec2-5e2005f42418"). InnerVolumeSpecName "kube-api-access-7pdl7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:48:39 crc kubenswrapper[5039]: I1124 13:48:39.931631 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a59c2a20-4a1f-4b68-aec2-5e2005f42418-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a59c2a20-4a1f-4b68-aec2-5e2005f42418" (UID: "a59c2a20-4a1f-4b68-aec2-5e2005f42418"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:48:39 crc kubenswrapper[5039]: I1124 13:48:39.932030 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a59c2a20-4a1f-4b68-aec2-5e2005f42418-inventory" (OuterVolumeSpecName: "inventory") pod "a59c2a20-4a1f-4b68-aec2-5e2005f42418" (UID: "a59c2a20-4a1f-4b68-aec2-5e2005f42418"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:48:39 crc kubenswrapper[5039]: I1124 13:48:39.995811 5039 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a59c2a20-4a1f-4b68-aec2-5e2005f42418-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:48:39 crc kubenswrapper[5039]: I1124 13:48:39.995844 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a59c2a20-4a1f-4b68-aec2-5e2005f42418-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 13:48:39 crc kubenswrapper[5039]: I1124 13:48:39.995853 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7pdl7\" (UniqueName: \"kubernetes.io/projected/a59c2a20-4a1f-4b68-aec2-5e2005f42418-kube-api-access-7pdl7\") on node \"crc\" DevicePath \"\"" Nov 24 13:48:39 crc kubenswrapper[5039]: I1124 13:48:39.995862 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a59c2a20-4a1f-4b68-aec2-5e2005f42418-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.235411 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd" event={"ID":"a59c2a20-4a1f-4b68-aec2-5e2005f42418","Type":"ContainerDied","Data":"fde5ff6f630fb2f142d05eff20078350d6054eddb531527296168c24a71cf2bc"} Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.235455 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fde5ff6f630fb2f142d05eff20078350d6054eddb531527296168c24a71cf2bc" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.235479 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.326285 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw"] Nov 24 13:48:40 crc kubenswrapper[5039]: E1124 13:48:40.326820 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ba28fd7-afae-4374-a4e4-3c0d969f8b96" containerName="extract-utilities" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.326888 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ba28fd7-afae-4374-a4e4-3c0d969f8b96" containerName="extract-utilities" Nov 24 13:48:40 crc kubenswrapper[5039]: E1124 13:48:40.326994 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a59c2a20-4a1f-4b68-aec2-5e2005f42418" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.327061 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="a59c2a20-4a1f-4b68-aec2-5e2005f42418" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 24 13:48:40 crc kubenswrapper[5039]: E1124 13:48:40.327132 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ba28fd7-afae-4374-a4e4-3c0d969f8b96" containerName="extract-content" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.327189 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ba28fd7-afae-4374-a4e4-3c0d969f8b96" containerName="extract-content" Nov 24 13:48:40 crc kubenswrapper[5039]: E1124 13:48:40.327255 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ba28fd7-afae-4374-a4e4-3c0d969f8b96" containerName="registry-server" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.327310 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ba28fd7-afae-4374-a4e4-3c0d969f8b96" containerName="registry-server" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.327610 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="a59c2a20-4a1f-4b68-aec2-5e2005f42418" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.327703 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ba28fd7-afae-4374-a4e4-3c0d969f8b96" containerName="registry-server" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.328436 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.330579 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.330615 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.331317 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.332011 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.342423 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw"] Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.406733 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93e04953-49f7-41fb-b2fb-514d4ed838e9-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw\" (UID: \"93e04953-49f7-41fb-b2fb-514d4ed838e9\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.406909 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxdd5\" (UniqueName: \"kubernetes.io/projected/93e04953-49f7-41fb-b2fb-514d4ed838e9-kube-api-access-kxdd5\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw\" (UID: \"93e04953-49f7-41fb-b2fb-514d4ed838e9\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.407059 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/93e04953-49f7-41fb-b2fb-514d4ed838e9-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw\" (UID: \"93e04953-49f7-41fb-b2fb-514d4ed838e9\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.508626 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/93e04953-49f7-41fb-b2fb-514d4ed838e9-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw\" (UID: \"93e04953-49f7-41fb-b2fb-514d4ed838e9\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.508719 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93e04953-49f7-41fb-b2fb-514d4ed838e9-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw\" (UID: \"93e04953-49f7-41fb-b2fb-514d4ed838e9\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.508852 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxdd5\" (UniqueName: \"kubernetes.io/projected/93e04953-49f7-41fb-b2fb-514d4ed838e9-kube-api-access-kxdd5\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw\" (UID: \"93e04953-49f7-41fb-b2fb-514d4ed838e9\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.512616 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/93e04953-49f7-41fb-b2fb-514d4ed838e9-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw\" (UID: \"93e04953-49f7-41fb-b2fb-514d4ed838e9\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.513337 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93e04953-49f7-41fb-b2fb-514d4ed838e9-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw\" (UID: \"93e04953-49f7-41fb-b2fb-514d4ed838e9\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.527459 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxdd5\" (UniqueName: \"kubernetes.io/projected/93e04953-49f7-41fb-b2fb-514d4ed838e9-kube-api-access-kxdd5\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw\" (UID: \"93e04953-49f7-41fb-b2fb-514d4ed838e9\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw" Nov 24 13:48:40 crc kubenswrapper[5039]: I1124 13:48:40.662148 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw" Nov 24 13:48:41 crc kubenswrapper[5039]: I1124 13:48:41.207103 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw"] Nov 24 13:48:41 crc kubenswrapper[5039]: I1124 13:48:41.219673 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 13:48:41 crc kubenswrapper[5039]: I1124 13:48:41.252031 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw" event={"ID":"93e04953-49f7-41fb-b2fb-514d4ed838e9","Type":"ContainerStarted","Data":"29c95a3f5040db0d830fa7b224340a0879a7a7721752767db20ff6e37418e579"} Nov 24 13:48:42 crc kubenswrapper[5039]: I1124 13:48:42.263606 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw" event={"ID":"93e04953-49f7-41fb-b2fb-514d4ed838e9","Type":"ContainerStarted","Data":"424d1a4d90c8dd5fcca1fcad01e4dcdac0f5a3dafc34fa5191ff12761c71a089"} Nov 24 13:48:42 crc kubenswrapper[5039]: I1124 13:48:42.283049 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw" podStartSLOduration=1.856863945 podStartE2EDuration="2.28303037s" podCreationTimestamp="2025-11-24 13:48:40 +0000 UTC" firstStartedPulling="2025-11-24 13:48:41.219451244 +0000 UTC m=+1833.658575744" lastFinishedPulling="2025-11-24 13:48:41.645617669 +0000 UTC m=+1834.084742169" observedRunningTime="2025-11-24 13:48:42.282303472 +0000 UTC m=+1834.721427982" watchObservedRunningTime="2025-11-24 13:48:42.28303037 +0000 UTC m=+1834.722154880" Nov 24 13:48:47 crc kubenswrapper[5039]: I1124 13:48:47.426377 5039 scope.go:117] "RemoveContainer" containerID="807e0bfefe975cd3524f0f960ea593d92b4f19f497a08465800071116473344b" Nov 24 13:49:25 crc kubenswrapper[5039]: I1124 13:49:25.042174 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-8xdmb"] Nov 24 13:49:25 crc kubenswrapper[5039]: I1124 13:49:25.055175 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-n9c94"] Nov 24 13:49:25 crc kubenswrapper[5039]: I1124 13:49:25.066571 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-683c-account-create-cddj7"] Nov 24 13:49:25 crc kubenswrapper[5039]: I1124 13:49:25.102740 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-26ef-account-create-jtkcn"] Nov 24 13:49:25 crc kubenswrapper[5039]: I1124 13:49:25.114070 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-683c-account-create-cddj7"] Nov 24 13:49:25 crc kubenswrapper[5039]: I1124 13:49:25.120736 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-26ef-account-create-jtkcn"] Nov 24 13:49:25 crc kubenswrapper[5039]: I1124 13:49:25.128921 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-8xdmb"] Nov 24 13:49:25 crc kubenswrapper[5039]: I1124 13:49:25.137083 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-n9c94"] Nov 24 13:49:26 crc kubenswrapper[5039]: I1124 13:49:26.035126 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-l5gbh"] Nov 24 13:49:26 crc kubenswrapper[5039]: I1124 13:49:26.046261 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-l5gbh"] Nov 24 13:49:26 crc kubenswrapper[5039]: I1124 13:49:26.054949 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-ddce-account-create-s8b2c"] Nov 24 13:49:26 crc kubenswrapper[5039]: I1124 13:49:26.063371 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-ddce-account-create-s8b2c"] Nov 24 13:49:26 crc kubenswrapper[5039]: I1124 13:49:26.322689 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39bc167d-fd5d-4522-8350-8a59cd32aced" path="/var/lib/kubelet/pods/39bc167d-fd5d-4522-8350-8a59cd32aced/volumes" Nov 24 13:49:26 crc kubenswrapper[5039]: I1124 13:49:26.323491 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d87b7ee-b5fa-442b-bf05-eaf35c945ca4" path="/var/lib/kubelet/pods/3d87b7ee-b5fa-442b-bf05-eaf35c945ca4/volumes" Nov 24 13:49:26 crc kubenswrapper[5039]: I1124 13:49:26.324143 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c87e221f-b63d-4883-a80b-084a56305cb1" path="/var/lib/kubelet/pods/c87e221f-b63d-4883-a80b-084a56305cb1/volumes" Nov 24 13:49:26 crc kubenswrapper[5039]: I1124 13:49:26.324767 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd84bf91-50f0-43ff-a40d-7973e2e54a0b" path="/var/lib/kubelet/pods/cd84bf91-50f0-43ff-a40d-7973e2e54a0b/volumes" Nov 24 13:49:26 crc kubenswrapper[5039]: I1124 13:49:26.325899 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cfcfc56f-8643-4195-8d18-b3076deac9d4" path="/var/lib/kubelet/pods/cfcfc56f-8643-4195-8d18-b3076deac9d4/volumes" Nov 24 13:49:26 crc kubenswrapper[5039]: I1124 13:49:26.326493 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8721aea-88f2-4436-9334-b3b85b3b08ed" path="/var/lib/kubelet/pods/d8721aea-88f2-4436-9334-b3b85b3b08ed/volumes" Nov 24 13:49:28 crc kubenswrapper[5039]: I1124 13:49:28.028485 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-7c1f-account-create-hnc4b"] Nov 24 13:49:28 crc kubenswrapper[5039]: I1124 13:49:28.038911 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-d56m7"] Nov 24 13:49:28 crc kubenswrapper[5039]: I1124 13:49:28.049679 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-d56m7"] Nov 24 13:49:28 crc kubenswrapper[5039]: I1124 13:49:28.060438 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-7c1f-account-create-hnc4b"] Nov 24 13:49:28 crc kubenswrapper[5039]: I1124 13:49:28.345168 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="236b7551-4b7b-4643-afe6-0bb78c880b3b" path="/var/lib/kubelet/pods/236b7551-4b7b-4643-afe6-0bb78c880b3b/volumes" Nov 24 13:49:28 crc kubenswrapper[5039]: I1124 13:49:28.346563 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1c38b9f-b596-43f3-83d4-9450c71fbaa6" path="/var/lib/kubelet/pods/c1c38b9f-b596-43f3-83d4-9450c71fbaa6/volumes" Nov 24 13:49:33 crc kubenswrapper[5039]: I1124 13:49:33.036788 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-mdrbd"] Nov 24 13:49:33 crc kubenswrapper[5039]: I1124 13:49:33.053943 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-mdrbd"] Nov 24 13:49:34 crc kubenswrapper[5039]: I1124 13:49:34.041458 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-d1ff-account-create-xsvvf"] Nov 24 13:49:34 crc kubenswrapper[5039]: I1124 13:49:34.053306 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-f9m74"] Nov 24 13:49:34 crc kubenswrapper[5039]: I1124 13:49:34.065467 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-8c2b-account-create-dcfff"] Nov 24 13:49:34 crc kubenswrapper[5039]: I1124 13:49:34.077725 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-f9m74"] Nov 24 13:49:34 crc kubenswrapper[5039]: I1124 13:49:34.086850 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-8c2b-account-create-dcfff"] Nov 24 13:49:34 crc kubenswrapper[5039]: I1124 13:49:34.096333 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-d1ff-account-create-xsvvf"] Nov 24 13:49:34 crc kubenswrapper[5039]: I1124 13:49:34.321375 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="739c6211-e4b4-4386-9d63-c9b680eb9114" path="/var/lib/kubelet/pods/739c6211-e4b4-4386-9d63-c9b680eb9114/volumes" Nov 24 13:49:34 crc kubenswrapper[5039]: I1124 13:49:34.322316 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b861c52a-7fd3-4027-931a-624b4149e21b" path="/var/lib/kubelet/pods/b861c52a-7fd3-4027-931a-624b4149e21b/volumes" Nov 24 13:49:34 crc kubenswrapper[5039]: I1124 13:49:34.323039 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d373e37d-246d-4b90-863b-b224a059c4e1" path="/var/lib/kubelet/pods/d373e37d-246d-4b90-863b-b224a059c4e1/volumes" Nov 24 13:49:34 crc kubenswrapper[5039]: I1124 13:49:34.323934 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f09296a1-0a30-4fb4-ba9f-c4744066800b" path="/var/lib/kubelet/pods/f09296a1-0a30-4fb4-ba9f-c4744066800b/volumes" Nov 24 13:49:37 crc kubenswrapper[5039]: I1124 13:49:37.067642 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-8a8a-account-create-w645s"] Nov 24 13:49:37 crc kubenswrapper[5039]: I1124 13:49:37.084699 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-h8pnd"] Nov 24 13:49:37 crc kubenswrapper[5039]: I1124 13:49:37.092807 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-9c10-account-create-h7m7w"] Nov 24 13:49:37 crc kubenswrapper[5039]: I1124 13:49:37.101275 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-8hph9"] Nov 24 13:49:37 crc kubenswrapper[5039]: I1124 13:49:37.112819 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-8a8a-account-create-w645s"] Nov 24 13:49:37 crc kubenswrapper[5039]: I1124 13:49:37.123478 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-h8pnd"] Nov 24 13:49:37 crc kubenswrapper[5039]: I1124 13:49:37.133182 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-8hph9"] Nov 24 13:49:37 crc kubenswrapper[5039]: I1124 13:49:37.144529 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-9c10-account-create-h7m7w"] Nov 24 13:49:37 crc kubenswrapper[5039]: I1124 13:49:37.157123 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-bdknn"] Nov 24 13:49:37 crc kubenswrapper[5039]: I1124 13:49:37.167151 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-35c8-account-create-5lwlj"] Nov 24 13:49:37 crc kubenswrapper[5039]: I1124 13:49:37.176964 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-35c8-account-create-5lwlj"] Nov 24 13:49:37 crc kubenswrapper[5039]: I1124 13:49:37.185183 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-bdknn"] Nov 24 13:49:38 crc kubenswrapper[5039]: I1124 13:49:38.317217 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0016dd31-1097-438e-9197-bd3f5c9659d3" path="/var/lib/kubelet/pods/0016dd31-1097-438e-9197-bd3f5c9659d3/volumes" Nov 24 13:49:38 crc kubenswrapper[5039]: I1124 13:49:38.317824 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40c8f747-a116-404e-af9e-85f85a759bed" path="/var/lib/kubelet/pods/40c8f747-a116-404e-af9e-85f85a759bed/volumes" Nov 24 13:49:38 crc kubenswrapper[5039]: I1124 13:49:38.318335 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91ea1a14-92d9-4a15-9cb7-accdb57351b0" path="/var/lib/kubelet/pods/91ea1a14-92d9-4a15-9cb7-accdb57351b0/volumes" Nov 24 13:49:38 crc kubenswrapper[5039]: I1124 13:49:38.318887 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d103981-92d4-4a79-a8e7-cf9f82c8135a" path="/var/lib/kubelet/pods/9d103981-92d4-4a79-a8e7-cf9f82c8135a/volumes" Nov 24 13:49:38 crc kubenswrapper[5039]: I1124 13:49:38.319879 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c56ef76f-5741-4430-8973-4c035fc82525" path="/var/lib/kubelet/pods/c56ef76f-5741-4430-8973-4c035fc82525/volumes" Nov 24 13:49:38 crc kubenswrapper[5039]: I1124 13:49:38.320374 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3ad43f7-b71c-4cd6-ad76-93f881fe820d" path="/var/lib/kubelet/pods/f3ad43f7-b71c-4cd6-ad76-93f881fe820d/volumes" Nov 24 13:49:42 crc kubenswrapper[5039]: I1124 13:49:42.025428 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-b4tnp"] Nov 24 13:49:42 crc kubenswrapper[5039]: I1124 13:49:42.034072 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-b4tnp"] Nov 24 13:49:42 crc kubenswrapper[5039]: I1124 13:49:42.320371 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2dbb3128-e4f7-4ff1-bc59-7873deed9a52" path="/var/lib/kubelet/pods/2dbb3128-e4f7-4ff1-bc59-7873deed9a52/volumes" Nov 24 13:49:47 crc kubenswrapper[5039]: I1124 13:49:47.522736 5039 scope.go:117] "RemoveContainer" containerID="d63debaf15a8e0d7912888d168410b64e2023f34b3992cb476eb43d389a4f94b" Nov 24 13:49:47 crc kubenswrapper[5039]: I1124 13:49:47.557504 5039 scope.go:117] "RemoveContainer" containerID="71689e8cb95ceeba081d1afab87813324767d13b01edf7929255125eb22fbdfc" Nov 24 13:49:47 crc kubenswrapper[5039]: I1124 13:49:47.603577 5039 scope.go:117] "RemoveContainer" containerID="0f123540ba7e5b3239817353478fbbac74f3b88ae4c9c89f0e6cb242756aa778" Nov 24 13:49:47 crc kubenswrapper[5039]: I1124 13:49:47.647709 5039 scope.go:117] "RemoveContainer" containerID="084452c6f5566811db74a3e646de966ab89f1bb00652cc545c4f9ef039dd9133" Nov 24 13:49:47 crc kubenswrapper[5039]: I1124 13:49:47.715796 5039 scope.go:117] "RemoveContainer" containerID="146618cf69f2a66b90fb82a7947127cfa95138536f81540fbfd428fd1f89afe5" Nov 24 13:49:47 crc kubenswrapper[5039]: I1124 13:49:47.777340 5039 scope.go:117] "RemoveContainer" containerID="6757bdea0fa369f6e9dcb78a19d029d0785ce801d1b7126895435b166de0baa7" Nov 24 13:49:47 crc kubenswrapper[5039]: I1124 13:49:47.849814 5039 scope.go:117] "RemoveContainer" containerID="b7b0e5e7951e46e5e5c37704f1abad05814d54068f9dc0e0be5d882c44a4a3c6" Nov 24 13:49:47 crc kubenswrapper[5039]: I1124 13:49:47.881177 5039 scope.go:117] "RemoveContainer" containerID="59fc9c31e57979785ff13a5825b4efd00dc42bb056e619cac78ae9fb73a6a149" Nov 24 13:49:47 crc kubenswrapper[5039]: I1124 13:49:47.910170 5039 scope.go:117] "RemoveContainer" containerID="e9aaa5251be8182c8baa2279431d7aa214fd17781ac0bcd3b738343f9f2bfa84" Nov 24 13:49:47 crc kubenswrapper[5039]: I1124 13:49:47.935766 5039 scope.go:117] "RemoveContainer" containerID="6929ed7d7ea24a1330c1fba516d8289152ccdde5a5b8fb164f38c1790497a4e0" Nov 24 13:49:47 crc kubenswrapper[5039]: I1124 13:49:47.974628 5039 scope.go:117] "RemoveContainer" containerID="7c65004452f1bcc11272cc1df757a0ece2c12a06cee1351b5caaad4a4e22b0de" Nov 24 13:49:48 crc kubenswrapper[5039]: I1124 13:49:48.001388 5039 scope.go:117] "RemoveContainer" containerID="8950ed724b546f92b7d71867b6ab3f8f57d0fd14baa0fedbe4f0a78589638084" Nov 24 13:49:48 crc kubenswrapper[5039]: I1124 13:49:48.029255 5039 scope.go:117] "RemoveContainer" containerID="3652015e828e32069b619566e1c0a8c8a0ec3380ac56af19d81a06514dd12790" Nov 24 13:49:48 crc kubenswrapper[5039]: I1124 13:49:48.121977 5039 scope.go:117] "RemoveContainer" containerID="0d8a1dd774dcd5386d094c01b4701a251605276874203e854d307191eadcb8c2" Nov 24 13:49:48 crc kubenswrapper[5039]: I1124 13:49:48.168457 5039 scope.go:117] "RemoveContainer" containerID="b0180965e15e5c38177a1db790e1068a187ffef0f2295e39adc2b759b4f864fd" Nov 24 13:49:48 crc kubenswrapper[5039]: I1124 13:49:48.198212 5039 scope.go:117] "RemoveContainer" containerID="d94b324e68efb784416ce91f44a095ea45e3ffb2eae1cd0ad9c58fe73d2887b3" Nov 24 13:49:48 crc kubenswrapper[5039]: I1124 13:49:48.237074 5039 scope.go:117] "RemoveContainer" containerID="0a224cfdf0893ddc4474174e70a8ac02ee1fe608afca2f72c6590de8442bbab5" Nov 24 13:49:48 crc kubenswrapper[5039]: I1124 13:49:48.274995 5039 scope.go:117] "RemoveContainer" containerID="b3bd42281f82b7f052bc504a1137f9d97fe06b4bcffada9efd517907c7badbfb" Nov 24 13:49:48 crc kubenswrapper[5039]: I1124 13:49:48.300915 5039 scope.go:117] "RemoveContainer" containerID="c2f0914e2a0324a4f9c803881825b288e8808496d77055ec30366e976583b6b5" Nov 24 13:49:48 crc kubenswrapper[5039]: I1124 13:49:48.375813 5039 generic.go:334] "Generic (PLEG): container finished" podID="93e04953-49f7-41fb-b2fb-514d4ed838e9" containerID="424d1a4d90c8dd5fcca1fcad01e4dcdac0f5a3dafc34fa5191ff12761c71a089" exitCode=0 Nov 24 13:49:48 crc kubenswrapper[5039]: I1124 13:49:48.375882 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw" event={"ID":"93e04953-49f7-41fb-b2fb-514d4ed838e9","Type":"ContainerDied","Data":"424d1a4d90c8dd5fcca1fcad01e4dcdac0f5a3dafc34fa5191ff12761c71a089"} Nov 24 13:49:49 crc kubenswrapper[5039]: I1124 13:49:49.876539 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.005104 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/93e04953-49f7-41fb-b2fb-514d4ed838e9-ssh-key\") pod \"93e04953-49f7-41fb-b2fb-514d4ed838e9\" (UID: \"93e04953-49f7-41fb-b2fb-514d4ed838e9\") " Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.005571 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kxdd5\" (UniqueName: \"kubernetes.io/projected/93e04953-49f7-41fb-b2fb-514d4ed838e9-kube-api-access-kxdd5\") pod \"93e04953-49f7-41fb-b2fb-514d4ed838e9\" (UID: \"93e04953-49f7-41fb-b2fb-514d4ed838e9\") " Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.005806 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93e04953-49f7-41fb-b2fb-514d4ed838e9-inventory\") pod \"93e04953-49f7-41fb-b2fb-514d4ed838e9\" (UID: \"93e04953-49f7-41fb-b2fb-514d4ed838e9\") " Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.011109 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93e04953-49f7-41fb-b2fb-514d4ed838e9-kube-api-access-kxdd5" (OuterVolumeSpecName: "kube-api-access-kxdd5") pod "93e04953-49f7-41fb-b2fb-514d4ed838e9" (UID: "93e04953-49f7-41fb-b2fb-514d4ed838e9"). InnerVolumeSpecName "kube-api-access-kxdd5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.034396 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93e04953-49f7-41fb-b2fb-514d4ed838e9-inventory" (OuterVolumeSpecName: "inventory") pod "93e04953-49f7-41fb-b2fb-514d4ed838e9" (UID: "93e04953-49f7-41fb-b2fb-514d4ed838e9"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.039406 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93e04953-49f7-41fb-b2fb-514d4ed838e9-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "93e04953-49f7-41fb-b2fb-514d4ed838e9" (UID: "93e04953-49f7-41fb-b2fb-514d4ed838e9"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.109755 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93e04953-49f7-41fb-b2fb-514d4ed838e9-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.109824 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/93e04953-49f7-41fb-b2fb-514d4ed838e9-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.109843 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kxdd5\" (UniqueName: \"kubernetes.io/projected/93e04953-49f7-41fb-b2fb-514d4ed838e9-kube-api-access-kxdd5\") on node \"crc\" DevicePath \"\"" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.444060 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw" event={"ID":"93e04953-49f7-41fb-b2fb-514d4ed838e9","Type":"ContainerDied","Data":"29c95a3f5040db0d830fa7b224340a0879a7a7721752767db20ff6e37418e579"} Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.444122 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="29c95a3f5040db0d830fa7b224340a0879a7a7721752767db20ff6e37418e579" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.444157 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.519858 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j"] Nov 24 13:49:50 crc kubenswrapper[5039]: E1124 13:49:50.520372 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93e04953-49f7-41fb-b2fb-514d4ed838e9" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.520399 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="93e04953-49f7-41fb-b2fb-514d4ed838e9" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.520705 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="93e04953-49f7-41fb-b2fb-514d4ed838e9" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.521673 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.524841 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.524986 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.525232 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.525951 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.540930 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j"] Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.619933 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-vf74j\" (UID: \"4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.620353 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2gxb\" (UniqueName: \"kubernetes.io/projected/4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2-kube-api-access-z2gxb\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-vf74j\" (UID: \"4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.620619 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-vf74j\" (UID: \"4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.722433 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-vf74j\" (UID: \"4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.722615 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2gxb\" (UniqueName: \"kubernetes.io/projected/4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2-kube-api-access-z2gxb\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-vf74j\" (UID: \"4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.722709 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-vf74j\" (UID: \"4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.726695 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-vf74j\" (UID: \"4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.731243 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-vf74j\" (UID: \"4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.753438 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2gxb\" (UniqueName: \"kubernetes.io/projected/4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2-kube-api-access-z2gxb\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-vf74j\" (UID: \"4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j" Nov 24 13:49:50 crc kubenswrapper[5039]: I1124 13:49:50.841010 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j" Nov 24 13:49:51 crc kubenswrapper[5039]: I1124 13:49:51.491141 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j"] Nov 24 13:49:52 crc kubenswrapper[5039]: I1124 13:49:52.464328 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j" event={"ID":"4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2","Type":"ContainerStarted","Data":"0abd66b830377b3f095bef5412d2f6535c67f0a5ef4884b951e0bc53779ebaa3"} Nov 24 13:49:52 crc kubenswrapper[5039]: I1124 13:49:52.464666 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j" event={"ID":"4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2","Type":"ContainerStarted","Data":"49f36e356cc7381e5685d5538c6a05ada909179209b574720e26b4c82b578af2"} Nov 24 13:49:52 crc kubenswrapper[5039]: I1124 13:49:52.489875 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j" podStartSLOduration=1.980289994 podStartE2EDuration="2.489853834s" podCreationTimestamp="2025-11-24 13:49:50 +0000 UTC" firstStartedPulling="2025-11-24 13:49:51.487519784 +0000 UTC m=+1903.926644294" lastFinishedPulling="2025-11-24 13:49:51.997083634 +0000 UTC m=+1904.436208134" observedRunningTime="2025-11-24 13:49:52.480056255 +0000 UTC m=+1904.919180755" watchObservedRunningTime="2025-11-24 13:49:52.489853834 +0000 UTC m=+1904.928978334" Nov 24 13:49:57 crc kubenswrapper[5039]: I1124 13:49:57.521288 5039 generic.go:334] "Generic (PLEG): container finished" podID="4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2" containerID="0abd66b830377b3f095bef5412d2f6535c67f0a5ef4884b951e0bc53779ebaa3" exitCode=0 Nov 24 13:49:57 crc kubenswrapper[5039]: I1124 13:49:57.521415 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j" event={"ID":"4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2","Type":"ContainerDied","Data":"0abd66b830377b3f095bef5412d2f6535c67f0a5ef4884b951e0bc53779ebaa3"} Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.008585 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.116014 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z2gxb\" (UniqueName: \"kubernetes.io/projected/4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2-kube-api-access-z2gxb\") pod \"4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2\" (UID: \"4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2\") " Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.116273 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2-inventory\") pod \"4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2\" (UID: \"4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2\") " Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.116382 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2-ssh-key\") pod \"4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2\" (UID: \"4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2\") " Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.125862 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2-kube-api-access-z2gxb" (OuterVolumeSpecName: "kube-api-access-z2gxb") pod "4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2" (UID: "4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2"). InnerVolumeSpecName "kube-api-access-z2gxb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.147187 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2-inventory" (OuterVolumeSpecName: "inventory") pod "4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2" (UID: "4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.151368 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2" (UID: "4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.218967 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.219001 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z2gxb\" (UniqueName: \"kubernetes.io/projected/4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2-kube-api-access-z2gxb\") on node \"crc\" DevicePath \"\"" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.219010 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.545896 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j" event={"ID":"4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2","Type":"ContainerDied","Data":"49f36e356cc7381e5685d5538c6a05ada909179209b574720e26b4c82b578af2"} Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.545953 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="49f36e356cc7381e5685d5538c6a05ada909179209b574720e26b4c82b578af2" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.545991 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.627942 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z"] Nov 24 13:49:59 crc kubenswrapper[5039]: E1124 13:49:59.628454 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.628472 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.628730 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.629469 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.632260 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.632579 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.632759 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.632957 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.644202 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z"] Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.729844 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c01a55af-b9b1-4958-b31e-b7b47527b7e2-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lh74z\" (UID: \"c01a55af-b9b1-4958-b31e-b7b47527b7e2\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.729978 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c01a55af-b9b1-4958-b31e-b7b47527b7e2-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lh74z\" (UID: \"c01a55af-b9b1-4958-b31e-b7b47527b7e2\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.730100 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5qht\" (UniqueName: \"kubernetes.io/projected/c01a55af-b9b1-4958-b31e-b7b47527b7e2-kube-api-access-m5qht\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lh74z\" (UID: \"c01a55af-b9b1-4958-b31e-b7b47527b7e2\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.832188 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5qht\" (UniqueName: \"kubernetes.io/projected/c01a55af-b9b1-4958-b31e-b7b47527b7e2-kube-api-access-m5qht\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lh74z\" (UID: \"c01a55af-b9b1-4958-b31e-b7b47527b7e2\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.832359 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c01a55af-b9b1-4958-b31e-b7b47527b7e2-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lh74z\" (UID: \"c01a55af-b9b1-4958-b31e-b7b47527b7e2\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.832431 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c01a55af-b9b1-4958-b31e-b7b47527b7e2-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lh74z\" (UID: \"c01a55af-b9b1-4958-b31e-b7b47527b7e2\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.836483 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c01a55af-b9b1-4958-b31e-b7b47527b7e2-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lh74z\" (UID: \"c01a55af-b9b1-4958-b31e-b7b47527b7e2\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.842821 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c01a55af-b9b1-4958-b31e-b7b47527b7e2-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lh74z\" (UID: \"c01a55af-b9b1-4958-b31e-b7b47527b7e2\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.852106 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5qht\" (UniqueName: \"kubernetes.io/projected/c01a55af-b9b1-4958-b31e-b7b47527b7e2-kube-api-access-m5qht\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lh74z\" (UID: \"c01a55af-b9b1-4958-b31e-b7b47527b7e2\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z" Nov 24 13:49:59 crc kubenswrapper[5039]: I1124 13:49:59.958243 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z" Nov 24 13:50:00 crc kubenswrapper[5039]: I1124 13:50:00.521188 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z"] Nov 24 13:50:00 crc kubenswrapper[5039]: I1124 13:50:00.557052 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z" event={"ID":"c01a55af-b9b1-4958-b31e-b7b47527b7e2","Type":"ContainerStarted","Data":"0cc7d41fd0294e6ceec77646a93f96bf47f3e86b759782416d40fc03db04ada6"} Nov 24 13:50:01 crc kubenswrapper[5039]: I1124 13:50:01.569492 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z" event={"ID":"c01a55af-b9b1-4958-b31e-b7b47527b7e2","Type":"ContainerStarted","Data":"b63a3e6fa61c5c7303c139a7343c1ae2f293f8749843c9c11a84343e7e4ce862"} Nov 24 13:50:01 crc kubenswrapper[5039]: I1124 13:50:01.590403 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z" podStartSLOduration=2.144810977 podStartE2EDuration="2.590382306s" podCreationTimestamp="2025-11-24 13:49:59 +0000 UTC" firstStartedPulling="2025-11-24 13:50:00.520984188 +0000 UTC m=+1912.960108688" lastFinishedPulling="2025-11-24 13:50:00.966555517 +0000 UTC m=+1913.405680017" observedRunningTime="2025-11-24 13:50:01.583751993 +0000 UTC m=+1914.022876493" watchObservedRunningTime="2025-11-24 13:50:01.590382306 +0000 UTC m=+1914.029506806" Nov 24 13:50:38 crc kubenswrapper[5039]: I1124 13:50:38.049166 5039 generic.go:334] "Generic (PLEG): container finished" podID="c01a55af-b9b1-4958-b31e-b7b47527b7e2" containerID="b63a3e6fa61c5c7303c139a7343c1ae2f293f8749843c9c11a84343e7e4ce862" exitCode=0 Nov 24 13:50:38 crc kubenswrapper[5039]: I1124 13:50:38.049281 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z" event={"ID":"c01a55af-b9b1-4958-b31e-b7b47527b7e2","Type":"ContainerDied","Data":"b63a3e6fa61c5c7303c139a7343c1ae2f293f8749843c9c11a84343e7e4ce862"} Nov 24 13:50:39 crc kubenswrapper[5039]: I1124 13:50:39.549972 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z" Nov 24 13:50:39 crc kubenswrapper[5039]: I1124 13:50:39.569200 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c01a55af-b9b1-4958-b31e-b7b47527b7e2-inventory\") pod \"c01a55af-b9b1-4958-b31e-b7b47527b7e2\" (UID: \"c01a55af-b9b1-4958-b31e-b7b47527b7e2\") " Nov 24 13:50:39 crc kubenswrapper[5039]: I1124 13:50:39.569566 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5qht\" (UniqueName: \"kubernetes.io/projected/c01a55af-b9b1-4958-b31e-b7b47527b7e2-kube-api-access-m5qht\") pod \"c01a55af-b9b1-4958-b31e-b7b47527b7e2\" (UID: \"c01a55af-b9b1-4958-b31e-b7b47527b7e2\") " Nov 24 13:50:39 crc kubenswrapper[5039]: I1124 13:50:39.569755 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c01a55af-b9b1-4958-b31e-b7b47527b7e2-ssh-key\") pod \"c01a55af-b9b1-4958-b31e-b7b47527b7e2\" (UID: \"c01a55af-b9b1-4958-b31e-b7b47527b7e2\") " Nov 24 13:50:39 crc kubenswrapper[5039]: I1124 13:50:39.580956 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c01a55af-b9b1-4958-b31e-b7b47527b7e2-kube-api-access-m5qht" (OuterVolumeSpecName: "kube-api-access-m5qht") pod "c01a55af-b9b1-4958-b31e-b7b47527b7e2" (UID: "c01a55af-b9b1-4958-b31e-b7b47527b7e2"). InnerVolumeSpecName "kube-api-access-m5qht". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:50:39 crc kubenswrapper[5039]: I1124 13:50:39.612351 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c01a55af-b9b1-4958-b31e-b7b47527b7e2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c01a55af-b9b1-4958-b31e-b7b47527b7e2" (UID: "c01a55af-b9b1-4958-b31e-b7b47527b7e2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:50:39 crc kubenswrapper[5039]: I1124 13:50:39.613293 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c01a55af-b9b1-4958-b31e-b7b47527b7e2-inventory" (OuterVolumeSpecName: "inventory") pod "c01a55af-b9b1-4958-b31e-b7b47527b7e2" (UID: "c01a55af-b9b1-4958-b31e-b7b47527b7e2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:50:39 crc kubenswrapper[5039]: I1124 13:50:39.673298 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c01a55af-b9b1-4958-b31e-b7b47527b7e2-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 13:50:39 crc kubenswrapper[5039]: I1124 13:50:39.673580 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c01a55af-b9b1-4958-b31e-b7b47527b7e2-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 13:50:39 crc kubenswrapper[5039]: I1124 13:50:39.673672 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m5qht\" (UniqueName: \"kubernetes.io/projected/c01a55af-b9b1-4958-b31e-b7b47527b7e2-kube-api-access-m5qht\") on node \"crc\" DevicePath \"\"" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.047638 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-dvmqw"] Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.058859 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-hmshf"] Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.071410 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-dvmqw"] Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.075218 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z" event={"ID":"c01a55af-b9b1-4958-b31e-b7b47527b7e2","Type":"ContainerDied","Data":"0cc7d41fd0294e6ceec77646a93f96bf47f3e86b759782416d40fc03db04ada6"} Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.075268 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0cc7d41fd0294e6ceec77646a93f96bf47f3e86b759782416d40fc03db04ada6" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.075309 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.081961 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-hmshf"] Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.152385 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v"] Nov 24 13:50:40 crc kubenswrapper[5039]: E1124 13:50:40.153027 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c01a55af-b9b1-4958-b31e-b7b47527b7e2" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.153051 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="c01a55af-b9b1-4958-b31e-b7b47527b7e2" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.153291 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="c01a55af-b9b1-4958-b31e-b7b47527b7e2" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.154319 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.156886 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.157151 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.157315 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.157569 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.175779 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v"] Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.183037 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dhbd\" (UniqueName: \"kubernetes.io/projected/15cc66e4-c047-4ab8-b10d-9e54fd7ef393-kube-api-access-8dhbd\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v\" (UID: \"15cc66e4-c047-4ab8-b10d-9e54fd7ef393\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.183141 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/15cc66e4-c047-4ab8-b10d-9e54fd7ef393-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v\" (UID: \"15cc66e4-c047-4ab8-b10d-9e54fd7ef393\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.183172 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/15cc66e4-c047-4ab8-b10d-9e54fd7ef393-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v\" (UID: \"15cc66e4-c047-4ab8-b10d-9e54fd7ef393\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.285210 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dhbd\" (UniqueName: \"kubernetes.io/projected/15cc66e4-c047-4ab8-b10d-9e54fd7ef393-kube-api-access-8dhbd\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v\" (UID: \"15cc66e4-c047-4ab8-b10d-9e54fd7ef393\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.285327 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/15cc66e4-c047-4ab8-b10d-9e54fd7ef393-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v\" (UID: \"15cc66e4-c047-4ab8-b10d-9e54fd7ef393\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.285368 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/15cc66e4-c047-4ab8-b10d-9e54fd7ef393-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v\" (UID: \"15cc66e4-c047-4ab8-b10d-9e54fd7ef393\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.290831 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/15cc66e4-c047-4ab8-b10d-9e54fd7ef393-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v\" (UID: \"15cc66e4-c047-4ab8-b10d-9e54fd7ef393\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.293529 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/15cc66e4-c047-4ab8-b10d-9e54fd7ef393-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v\" (UID: \"15cc66e4-c047-4ab8-b10d-9e54fd7ef393\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.302150 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dhbd\" (UniqueName: \"kubernetes.io/projected/15cc66e4-c047-4ab8-b10d-9e54fd7ef393-kube-api-access-8dhbd\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v\" (UID: \"15cc66e4-c047-4ab8-b10d-9e54fd7ef393\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.319819 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1577e002-5267-48f4-b292-158ebed8410c" path="/var/lib/kubelet/pods/1577e002-5267-48f4-b292-158ebed8410c/volumes" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.320373 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a1e2c3e-f294-40f6-9b98-5fd16034f145" path="/var/lib/kubelet/pods/6a1e2c3e-f294-40f6-9b98-5fd16034f145/volumes" Nov 24 13:50:40 crc kubenswrapper[5039]: I1124 13:50:40.476016 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v" Nov 24 13:50:41 crc kubenswrapper[5039]: I1124 13:50:41.026753 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v"] Nov 24 13:50:41 crc kubenswrapper[5039]: I1124 13:50:41.084112 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v" event={"ID":"15cc66e4-c047-4ab8-b10d-9e54fd7ef393","Type":"ContainerStarted","Data":"a4a3bb4d3763b86d4eb5598cf0bfbd496b80bf8229c2cd7bf7043c9f6e3856a7"} Nov 24 13:50:42 crc kubenswrapper[5039]: I1124 13:50:42.094067 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v" event={"ID":"15cc66e4-c047-4ab8-b10d-9e54fd7ef393","Type":"ContainerStarted","Data":"1731a314676a903005df934daf142b0ba23e9c1bd8e163fa60023b8d117193f2"} Nov 24 13:50:42 crc kubenswrapper[5039]: I1124 13:50:42.113075 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v" podStartSLOduration=1.474760797 podStartE2EDuration="2.113055288s" podCreationTimestamp="2025-11-24 13:50:40 +0000 UTC" firstStartedPulling="2025-11-24 13:50:41.029687625 +0000 UTC m=+1953.468812125" lastFinishedPulling="2025-11-24 13:50:41.667982106 +0000 UTC m=+1954.107106616" observedRunningTime="2025-11-24 13:50:42.109642805 +0000 UTC m=+1954.548767315" watchObservedRunningTime="2025-11-24 13:50:42.113055288 +0000 UTC m=+1954.552179788" Nov 24 13:50:46 crc kubenswrapper[5039]: I1124 13:50:46.144308 5039 generic.go:334] "Generic (PLEG): container finished" podID="15cc66e4-c047-4ab8-b10d-9e54fd7ef393" containerID="1731a314676a903005df934daf142b0ba23e9c1bd8e163fa60023b8d117193f2" exitCode=0 Nov 24 13:50:46 crc kubenswrapper[5039]: I1124 13:50:46.144432 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v" event={"ID":"15cc66e4-c047-4ab8-b10d-9e54fd7ef393","Type":"ContainerDied","Data":"1731a314676a903005df934daf142b0ba23e9c1bd8e163fa60023b8d117193f2"} Nov 24 13:50:47 crc kubenswrapper[5039]: I1124 13:50:47.607356 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v" Nov 24 13:50:47 crc kubenswrapper[5039]: I1124 13:50:47.664194 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/15cc66e4-c047-4ab8-b10d-9e54fd7ef393-ssh-key\") pod \"15cc66e4-c047-4ab8-b10d-9e54fd7ef393\" (UID: \"15cc66e4-c047-4ab8-b10d-9e54fd7ef393\") " Nov 24 13:50:47 crc kubenswrapper[5039]: I1124 13:50:47.664284 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/15cc66e4-c047-4ab8-b10d-9e54fd7ef393-inventory\") pod \"15cc66e4-c047-4ab8-b10d-9e54fd7ef393\" (UID: \"15cc66e4-c047-4ab8-b10d-9e54fd7ef393\") " Nov 24 13:50:47 crc kubenswrapper[5039]: I1124 13:50:47.664345 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8dhbd\" (UniqueName: \"kubernetes.io/projected/15cc66e4-c047-4ab8-b10d-9e54fd7ef393-kube-api-access-8dhbd\") pod \"15cc66e4-c047-4ab8-b10d-9e54fd7ef393\" (UID: \"15cc66e4-c047-4ab8-b10d-9e54fd7ef393\") " Nov 24 13:50:47 crc kubenswrapper[5039]: I1124 13:50:47.669398 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15cc66e4-c047-4ab8-b10d-9e54fd7ef393-kube-api-access-8dhbd" (OuterVolumeSpecName: "kube-api-access-8dhbd") pod "15cc66e4-c047-4ab8-b10d-9e54fd7ef393" (UID: "15cc66e4-c047-4ab8-b10d-9e54fd7ef393"). InnerVolumeSpecName "kube-api-access-8dhbd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:50:47 crc kubenswrapper[5039]: I1124 13:50:47.696757 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15cc66e4-c047-4ab8-b10d-9e54fd7ef393-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "15cc66e4-c047-4ab8-b10d-9e54fd7ef393" (UID: "15cc66e4-c047-4ab8-b10d-9e54fd7ef393"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:50:47 crc kubenswrapper[5039]: I1124 13:50:47.699021 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15cc66e4-c047-4ab8-b10d-9e54fd7ef393-inventory" (OuterVolumeSpecName: "inventory") pod "15cc66e4-c047-4ab8-b10d-9e54fd7ef393" (UID: "15cc66e4-c047-4ab8-b10d-9e54fd7ef393"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:50:47 crc kubenswrapper[5039]: I1124 13:50:47.768257 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/15cc66e4-c047-4ab8-b10d-9e54fd7ef393-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 13:50:47 crc kubenswrapper[5039]: I1124 13:50:47.768319 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/15cc66e4-c047-4ab8-b10d-9e54fd7ef393-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 13:50:47 crc kubenswrapper[5039]: I1124 13:50:47.768340 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8dhbd\" (UniqueName: \"kubernetes.io/projected/15cc66e4-c047-4ab8-b10d-9e54fd7ef393-kube-api-access-8dhbd\") on node \"crc\" DevicePath \"\"" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.174860 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v" event={"ID":"15cc66e4-c047-4ab8-b10d-9e54fd7ef393","Type":"ContainerDied","Data":"a4a3bb4d3763b86d4eb5598cf0bfbd496b80bf8229c2cd7bf7043c9f6e3856a7"} Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.174919 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a4a3bb4d3763b86d4eb5598cf0bfbd496b80bf8229c2cd7bf7043c9f6e3856a7" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.175004 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.345801 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg"] Nov 24 13:50:48 crc kubenswrapper[5039]: E1124 13:50:48.346360 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15cc66e4-c047-4ab8-b10d-9e54fd7ef393" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.346388 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="15cc66e4-c047-4ab8-b10d-9e54fd7ef393" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.346694 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="15cc66e4-c047-4ab8-b10d-9e54fd7ef393" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.347652 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.356360 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg"] Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.379176 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.379338 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.379572 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.379695 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.380856 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7g7ts\" (UniqueName: \"kubernetes.io/projected/11aaad36-5e7f-4f08-b7fd-9a547c514331-kube-api-access-7g7ts\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg\" (UID: \"11aaad36-5e7f-4f08-b7fd-9a547c514331\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.381067 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/11aaad36-5e7f-4f08-b7fd-9a547c514331-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg\" (UID: \"11aaad36-5e7f-4f08-b7fd-9a547c514331\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.381220 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/11aaad36-5e7f-4f08-b7fd-9a547c514331-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg\" (UID: \"11aaad36-5e7f-4f08-b7fd-9a547c514331\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.482778 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/11aaad36-5e7f-4f08-b7fd-9a547c514331-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg\" (UID: \"11aaad36-5e7f-4f08-b7fd-9a547c514331\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.482869 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/11aaad36-5e7f-4f08-b7fd-9a547c514331-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg\" (UID: \"11aaad36-5e7f-4f08-b7fd-9a547c514331\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.482947 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7g7ts\" (UniqueName: \"kubernetes.io/projected/11aaad36-5e7f-4f08-b7fd-9a547c514331-kube-api-access-7g7ts\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg\" (UID: \"11aaad36-5e7f-4f08-b7fd-9a547c514331\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.488181 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/11aaad36-5e7f-4f08-b7fd-9a547c514331-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg\" (UID: \"11aaad36-5e7f-4f08-b7fd-9a547c514331\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.491242 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/11aaad36-5e7f-4f08-b7fd-9a547c514331-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg\" (UID: \"11aaad36-5e7f-4f08-b7fd-9a547c514331\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.509170 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7g7ts\" (UniqueName: \"kubernetes.io/projected/11aaad36-5e7f-4f08-b7fd-9a547c514331-kube-api-access-7g7ts\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg\" (UID: \"11aaad36-5e7f-4f08-b7fd-9a547c514331\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.678454 5039 scope.go:117] "RemoveContainer" containerID="19b11a9f9da4658334a3496647f8f469118a8cfc8fb444799ab27ff258767d7a" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.694301 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg" Nov 24 13:50:48 crc kubenswrapper[5039]: I1124 13:50:48.711810 5039 scope.go:117] "RemoveContainer" containerID="baa7abd18ef86b6add604ce2f46066004e539d770256de57e129ebb1f3aa059a" Nov 24 13:50:49 crc kubenswrapper[5039]: I1124 13:50:49.277867 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg"] Nov 24 13:50:50 crc kubenswrapper[5039]: I1124 13:50:50.037535 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-x49zw"] Nov 24 13:50:50 crc kubenswrapper[5039]: I1124 13:50:50.057737 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-x49zw"] Nov 24 13:50:50 crc kubenswrapper[5039]: I1124 13:50:50.101845 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:50:50 crc kubenswrapper[5039]: I1124 13:50:50.101908 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:50:50 crc kubenswrapper[5039]: I1124 13:50:50.197212 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg" event={"ID":"11aaad36-5e7f-4f08-b7fd-9a547c514331","Type":"ContainerStarted","Data":"4211afdf0c538b3ca8ab02a3ce90a6f40a1b50557616fe05f0a5ba51799a8c2c"} Nov 24 13:50:50 crc kubenswrapper[5039]: I1124 13:50:50.197260 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg" event={"ID":"11aaad36-5e7f-4f08-b7fd-9a547c514331","Type":"ContainerStarted","Data":"a4ea892efd1ad5ed8a0de1f06d4754076f60a4325ab701f61064ca8fa66948e6"} Nov 24 13:50:50 crc kubenswrapper[5039]: I1124 13:50:50.218242 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg" podStartSLOduration=1.779120893 podStartE2EDuration="2.218225008s" podCreationTimestamp="2025-11-24 13:50:48 +0000 UTC" firstStartedPulling="2025-11-24 13:50:49.284092078 +0000 UTC m=+1961.723216618" lastFinishedPulling="2025-11-24 13:50:49.723196233 +0000 UTC m=+1962.162320733" observedRunningTime="2025-11-24 13:50:50.211219327 +0000 UTC m=+1962.650343827" watchObservedRunningTime="2025-11-24 13:50:50.218225008 +0000 UTC m=+1962.657349498" Nov 24 13:50:50 crc kubenswrapper[5039]: I1124 13:50:50.319327 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ab794c0-2264-4041-b697-ef7829a5129a" path="/var/lib/kubelet/pods/4ab794c0-2264-4041-b697-ef7829a5129a/volumes" Nov 24 13:50:51 crc kubenswrapper[5039]: I1124 13:50:51.027369 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-wgc6v"] Nov 24 13:50:51 crc kubenswrapper[5039]: I1124 13:50:51.036186 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-wgc6v"] Nov 24 13:50:52 crc kubenswrapper[5039]: I1124 13:50:52.322431 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a3938495-f119-4641-b76b-0333c1391b24" path="/var/lib/kubelet/pods/a3938495-f119-4641-b76b-0333c1391b24/volumes" Nov 24 13:50:54 crc kubenswrapper[5039]: I1124 13:50:54.035422 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-47dzd"] Nov 24 13:50:54 crc kubenswrapper[5039]: I1124 13:50:54.045476 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-vvjsm"] Nov 24 13:50:54 crc kubenswrapper[5039]: I1124 13:50:54.053383 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-47dzd"] Nov 24 13:50:54 crc kubenswrapper[5039]: I1124 13:50:54.062319 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-vvjsm"] Nov 24 13:50:54 crc kubenswrapper[5039]: I1124 13:50:54.336735 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c22dae7-e545-4eb0-9552-f3c691f397df" path="/var/lib/kubelet/pods/2c22dae7-e545-4eb0-9552-f3c691f397df/volumes" Nov 24 13:50:54 crc kubenswrapper[5039]: I1124 13:50:54.338441 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb2d453a-99e6-4593-ad2d-a57c7a2c2519" path="/var/lib/kubelet/pods/cb2d453a-99e6-4593-ad2d-a57c7a2c2519/volumes" Nov 24 13:51:20 crc kubenswrapper[5039]: I1124 13:51:20.101399 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:51:20 crc kubenswrapper[5039]: I1124 13:51:20.102042 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:51:39 crc kubenswrapper[5039]: I1124 13:51:39.749292 5039 generic.go:334] "Generic (PLEG): container finished" podID="11aaad36-5e7f-4f08-b7fd-9a547c514331" containerID="4211afdf0c538b3ca8ab02a3ce90a6f40a1b50557616fe05f0a5ba51799a8c2c" exitCode=0 Nov 24 13:51:39 crc kubenswrapper[5039]: I1124 13:51:39.749349 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg" event={"ID":"11aaad36-5e7f-4f08-b7fd-9a547c514331","Type":"ContainerDied","Data":"4211afdf0c538b3ca8ab02a3ce90a6f40a1b50557616fe05f0a5ba51799a8c2c"} Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.230313 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg" Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.416157 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7g7ts\" (UniqueName: \"kubernetes.io/projected/11aaad36-5e7f-4f08-b7fd-9a547c514331-kube-api-access-7g7ts\") pod \"11aaad36-5e7f-4f08-b7fd-9a547c514331\" (UID: \"11aaad36-5e7f-4f08-b7fd-9a547c514331\") " Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.416235 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/11aaad36-5e7f-4f08-b7fd-9a547c514331-ssh-key\") pod \"11aaad36-5e7f-4f08-b7fd-9a547c514331\" (UID: \"11aaad36-5e7f-4f08-b7fd-9a547c514331\") " Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.416491 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/11aaad36-5e7f-4f08-b7fd-9a547c514331-inventory\") pod \"11aaad36-5e7f-4f08-b7fd-9a547c514331\" (UID: \"11aaad36-5e7f-4f08-b7fd-9a547c514331\") " Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.422674 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11aaad36-5e7f-4f08-b7fd-9a547c514331-kube-api-access-7g7ts" (OuterVolumeSpecName: "kube-api-access-7g7ts") pod "11aaad36-5e7f-4f08-b7fd-9a547c514331" (UID: "11aaad36-5e7f-4f08-b7fd-9a547c514331"). InnerVolumeSpecName "kube-api-access-7g7ts". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.449206 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11aaad36-5e7f-4f08-b7fd-9a547c514331-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "11aaad36-5e7f-4f08-b7fd-9a547c514331" (UID: "11aaad36-5e7f-4f08-b7fd-9a547c514331"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.472807 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11aaad36-5e7f-4f08-b7fd-9a547c514331-inventory" (OuterVolumeSpecName: "inventory") pod "11aaad36-5e7f-4f08-b7fd-9a547c514331" (UID: "11aaad36-5e7f-4f08-b7fd-9a547c514331"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.520182 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/11aaad36-5e7f-4f08-b7fd-9a547c514331-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.520226 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7g7ts\" (UniqueName: \"kubernetes.io/projected/11aaad36-5e7f-4f08-b7fd-9a547c514331-kube-api-access-7g7ts\") on node \"crc\" DevicePath \"\"" Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.520240 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/11aaad36-5e7f-4f08-b7fd-9a547c514331-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.772571 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg" event={"ID":"11aaad36-5e7f-4f08-b7fd-9a547c514331","Type":"ContainerDied","Data":"a4ea892efd1ad5ed8a0de1f06d4754076f60a4325ab701f61064ca8fa66948e6"} Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.772617 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a4ea892efd1ad5ed8a0de1f06d4754076f60a4325ab701f61064ca8fa66948e6" Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.772655 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg" Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.869283 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-vk2n7"] Nov 24 13:51:41 crc kubenswrapper[5039]: E1124 13:51:41.869780 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11aaad36-5e7f-4f08-b7fd-9a547c514331" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.869803 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="11aaad36-5e7f-4f08-b7fd-9a547c514331" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.870089 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="11aaad36-5e7f-4f08-b7fd-9a547c514331" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.870976 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-vk2n7" Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.873149 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.873688 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.873979 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.877169 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 13:51:41 crc kubenswrapper[5039]: I1124 13:51:41.889752 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-vk2n7"] Nov 24 13:51:42 crc kubenswrapper[5039]: I1124 13:51:42.029577 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zkbk\" (UniqueName: \"kubernetes.io/projected/6139740a-dbfc-41c6-baf0-9651b805c47c-kube-api-access-6zkbk\") pod \"ssh-known-hosts-edpm-deployment-vk2n7\" (UID: \"6139740a-dbfc-41c6-baf0-9651b805c47c\") " pod="openstack/ssh-known-hosts-edpm-deployment-vk2n7" Nov 24 13:51:42 crc kubenswrapper[5039]: I1124 13:51:42.029640 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6139740a-dbfc-41c6-baf0-9651b805c47c-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-vk2n7\" (UID: \"6139740a-dbfc-41c6-baf0-9651b805c47c\") " pod="openstack/ssh-known-hosts-edpm-deployment-vk2n7" Nov 24 13:51:42 crc kubenswrapper[5039]: I1124 13:51:42.029786 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/6139740a-dbfc-41c6-baf0-9651b805c47c-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-vk2n7\" (UID: \"6139740a-dbfc-41c6-baf0-9651b805c47c\") " pod="openstack/ssh-known-hosts-edpm-deployment-vk2n7" Nov 24 13:51:42 crc kubenswrapper[5039]: I1124 13:51:42.131741 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/6139740a-dbfc-41c6-baf0-9651b805c47c-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-vk2n7\" (UID: \"6139740a-dbfc-41c6-baf0-9651b805c47c\") " pod="openstack/ssh-known-hosts-edpm-deployment-vk2n7" Nov 24 13:51:42 crc kubenswrapper[5039]: I1124 13:51:42.131919 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zkbk\" (UniqueName: \"kubernetes.io/projected/6139740a-dbfc-41c6-baf0-9651b805c47c-kube-api-access-6zkbk\") pod \"ssh-known-hosts-edpm-deployment-vk2n7\" (UID: \"6139740a-dbfc-41c6-baf0-9651b805c47c\") " pod="openstack/ssh-known-hosts-edpm-deployment-vk2n7" Nov 24 13:51:42 crc kubenswrapper[5039]: I1124 13:51:42.131951 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6139740a-dbfc-41c6-baf0-9651b805c47c-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-vk2n7\" (UID: \"6139740a-dbfc-41c6-baf0-9651b805c47c\") " pod="openstack/ssh-known-hosts-edpm-deployment-vk2n7" Nov 24 13:51:42 crc kubenswrapper[5039]: I1124 13:51:42.135379 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6139740a-dbfc-41c6-baf0-9651b805c47c-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-vk2n7\" (UID: \"6139740a-dbfc-41c6-baf0-9651b805c47c\") " pod="openstack/ssh-known-hosts-edpm-deployment-vk2n7" Nov 24 13:51:42 crc kubenswrapper[5039]: I1124 13:51:42.137542 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/6139740a-dbfc-41c6-baf0-9651b805c47c-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-vk2n7\" (UID: \"6139740a-dbfc-41c6-baf0-9651b805c47c\") " pod="openstack/ssh-known-hosts-edpm-deployment-vk2n7" Nov 24 13:51:42 crc kubenswrapper[5039]: I1124 13:51:42.156296 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zkbk\" (UniqueName: \"kubernetes.io/projected/6139740a-dbfc-41c6-baf0-9651b805c47c-kube-api-access-6zkbk\") pod \"ssh-known-hosts-edpm-deployment-vk2n7\" (UID: \"6139740a-dbfc-41c6-baf0-9651b805c47c\") " pod="openstack/ssh-known-hosts-edpm-deployment-vk2n7" Nov 24 13:51:42 crc kubenswrapper[5039]: I1124 13:51:42.240878 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-vk2n7" Nov 24 13:51:42 crc kubenswrapper[5039]: I1124 13:51:42.792455 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-vk2n7"] Nov 24 13:51:43 crc kubenswrapper[5039]: I1124 13:51:43.042225 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-69e6-account-create-5kzxp"] Nov 24 13:51:43 crc kubenswrapper[5039]: I1124 13:51:43.054358 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-69e6-account-create-5kzxp"] Nov 24 13:51:43 crc kubenswrapper[5039]: I1124 13:51:43.802122 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-vk2n7" event={"ID":"6139740a-dbfc-41c6-baf0-9651b805c47c","Type":"ContainerStarted","Data":"63592d0398b308a4df971917ac5eebaed0aa2eb42a6d0d162d20f66958c1aa9a"} Nov 24 13:51:43 crc kubenswrapper[5039]: I1124 13:51:43.802415 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-vk2n7" event={"ID":"6139740a-dbfc-41c6-baf0-9651b805c47c","Type":"ContainerStarted","Data":"39abf97b782780bd10434522ecd210bb78eaf9621c89087f5fdfded286dd9639"} Nov 24 13:51:43 crc kubenswrapper[5039]: I1124 13:51:43.825144 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-vk2n7" podStartSLOduration=2.42789391 podStartE2EDuration="2.82512322s" podCreationTimestamp="2025-11-24 13:51:41 +0000 UTC" firstStartedPulling="2025-11-24 13:51:42.784629397 +0000 UTC m=+2015.223753897" lastFinishedPulling="2025-11-24 13:51:43.181858707 +0000 UTC m=+2015.620983207" observedRunningTime="2025-11-24 13:51:43.820757453 +0000 UTC m=+2016.259881953" watchObservedRunningTime="2025-11-24 13:51:43.82512322 +0000 UTC m=+2016.264247710" Nov 24 13:51:44 crc kubenswrapper[5039]: I1124 13:51:44.030014 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-2540-account-create-g8rgr"] Nov 24 13:51:44 crc kubenswrapper[5039]: I1124 13:51:44.038742 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-ba86-account-create-zsw6z"] Nov 24 13:51:44 crc kubenswrapper[5039]: I1124 13:51:44.049666 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-7zj9p"] Nov 24 13:51:44 crc kubenswrapper[5039]: I1124 13:51:44.057574 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-bdq8h"] Nov 24 13:51:44 crc kubenswrapper[5039]: I1124 13:51:44.064988 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-5blmb"] Nov 24 13:51:44 crc kubenswrapper[5039]: I1124 13:51:44.072454 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-2540-account-create-g8rgr"] Nov 24 13:51:44 crc kubenswrapper[5039]: I1124 13:51:44.080133 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-ba86-account-create-zsw6z"] Nov 24 13:51:44 crc kubenswrapper[5039]: I1124 13:51:44.087306 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-bdq8h"] Nov 24 13:51:44 crc kubenswrapper[5039]: I1124 13:51:44.094809 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-7zj9p"] Nov 24 13:51:44 crc kubenswrapper[5039]: I1124 13:51:44.101973 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-5blmb"] Nov 24 13:51:44 crc kubenswrapper[5039]: I1124 13:51:44.320391 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11fd95de-91de-400f-a931-ca7339de0a76" path="/var/lib/kubelet/pods/11fd95de-91de-400f-a931-ca7339de0a76/volumes" Nov 24 13:51:44 crc kubenswrapper[5039]: I1124 13:51:44.321281 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b1111a8-04ac-478b-b1bf-557246566f05" path="/var/lib/kubelet/pods/4b1111a8-04ac-478b-b1bf-557246566f05/volumes" Nov 24 13:51:44 crc kubenswrapper[5039]: I1124 13:51:44.322032 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6721127c-79a1-4fd5-98db-0e99ff78de0e" path="/var/lib/kubelet/pods/6721127c-79a1-4fd5-98db-0e99ff78de0e/volumes" Nov 24 13:51:44 crc kubenswrapper[5039]: I1124 13:51:44.322775 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a18b1393-b27b-42f7-938d-cf3321f376d6" path="/var/lib/kubelet/pods/a18b1393-b27b-42f7-938d-cf3321f376d6/volumes" Nov 24 13:51:44 crc kubenswrapper[5039]: I1124 13:51:44.324253 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a41f29ac-696c-4aa9-aed7-b8959e15fa52" path="/var/lib/kubelet/pods/a41f29ac-696c-4aa9-aed7-b8959e15fa52/volumes" Nov 24 13:51:44 crc kubenswrapper[5039]: I1124 13:51:44.325257 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a73195a6-8449-41fa-ad7c-5ce086c264ec" path="/var/lib/kubelet/pods/a73195a6-8449-41fa-ad7c-5ce086c264ec/volumes" Nov 24 13:51:48 crc kubenswrapper[5039]: I1124 13:51:48.926860 5039 scope.go:117] "RemoveContainer" containerID="7c6c7338818ea35148780b97081efddcc9fc41f98cab5a074b848dee9ae97e8b" Nov 24 13:51:48 crc kubenswrapper[5039]: I1124 13:51:48.965473 5039 scope.go:117] "RemoveContainer" containerID="11f507bd49d43b06d4559e076aa96bed1718d9906ec6ca45d15e51ce44d771f4" Nov 24 13:51:49 crc kubenswrapper[5039]: I1124 13:51:49.037912 5039 scope.go:117] "RemoveContainer" containerID="d7d421dc29cae07402f5080b927654682301e6b252a7c213462072ff461081f2" Nov 24 13:51:49 crc kubenswrapper[5039]: I1124 13:51:49.088790 5039 scope.go:117] "RemoveContainer" containerID="36249bd63021c1e97ceee74e50e1de631a98586dfedf719d9a4b4afae3b296a9" Nov 24 13:51:49 crc kubenswrapper[5039]: I1124 13:51:49.150144 5039 scope.go:117] "RemoveContainer" containerID="647c2f85bd87a6d71e684d9b323535c07159bca744eaa84f2704934aace84ee0" Nov 24 13:51:49 crc kubenswrapper[5039]: I1124 13:51:49.184874 5039 scope.go:117] "RemoveContainer" containerID="91f775a9e48154d99b12708d5794be3c4a30d39b15993c9d325891276817ad85" Nov 24 13:51:49 crc kubenswrapper[5039]: I1124 13:51:49.250875 5039 scope.go:117] "RemoveContainer" containerID="77dc73bdbef96dc90c16a82371a584af473439a7be45247415e9a0b9c5dc44cc" Nov 24 13:51:49 crc kubenswrapper[5039]: I1124 13:51:49.280842 5039 scope.go:117] "RemoveContainer" containerID="f3fe0c60386d097c989c6dbfd9c6b2c46f56109fc7c14e1eea13c0b07ccf2fc8" Nov 24 13:51:49 crc kubenswrapper[5039]: I1124 13:51:49.304196 5039 scope.go:117] "RemoveContainer" containerID="e33183e69a2bed150c7c16dc7ff7d80a50190a0a951167491f30768b6e5f5a5e" Nov 24 13:51:49 crc kubenswrapper[5039]: I1124 13:51:49.333660 5039 scope.go:117] "RemoveContainer" containerID="7f7eb8a637904516979cfa166420529ce38a51580cbe6d0f59544fbc11742b15" Nov 24 13:51:50 crc kubenswrapper[5039]: I1124 13:51:50.101448 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:51:50 crc kubenswrapper[5039]: I1124 13:51:50.101530 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:51:50 crc kubenswrapper[5039]: I1124 13:51:50.101583 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:51:50 crc kubenswrapper[5039]: I1124 13:51:50.102326 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"43f9e17fdc829b04a1d158fb340e5b63c9b87b25d3decfdb862bbf4e2559df49"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 13:51:50 crc kubenswrapper[5039]: I1124 13:51:50.102383 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://43f9e17fdc829b04a1d158fb340e5b63c9b87b25d3decfdb862bbf4e2559df49" gracePeriod=600 Nov 24 13:51:50 crc kubenswrapper[5039]: I1124 13:51:50.892539 5039 generic.go:334] "Generic (PLEG): container finished" podID="6139740a-dbfc-41c6-baf0-9651b805c47c" containerID="63592d0398b308a4df971917ac5eebaed0aa2eb42a6d0d162d20f66958c1aa9a" exitCode=0 Nov 24 13:51:50 crc kubenswrapper[5039]: I1124 13:51:50.892646 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-vk2n7" event={"ID":"6139740a-dbfc-41c6-baf0-9651b805c47c","Type":"ContainerDied","Data":"63592d0398b308a4df971917ac5eebaed0aa2eb42a6d0d162d20f66958c1aa9a"} Nov 24 13:51:50 crc kubenswrapper[5039]: I1124 13:51:50.898570 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="43f9e17fdc829b04a1d158fb340e5b63c9b87b25d3decfdb862bbf4e2559df49" exitCode=0 Nov 24 13:51:50 crc kubenswrapper[5039]: I1124 13:51:50.898643 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"43f9e17fdc829b04a1d158fb340e5b63c9b87b25d3decfdb862bbf4e2559df49"} Nov 24 13:51:50 crc kubenswrapper[5039]: I1124 13:51:50.898705 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451"} Nov 24 13:51:50 crc kubenswrapper[5039]: I1124 13:51:50.898722 5039 scope.go:117] "RemoveContainer" containerID="d7208ac24050ea594dc2cb3334a8183ac3cb6514527d87e4a1ee2d2772153133" Nov 24 13:51:52 crc kubenswrapper[5039]: I1124 13:51:52.461416 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-vk2n7" Nov 24 13:51:52 crc kubenswrapper[5039]: I1124 13:51:52.597199 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6zkbk\" (UniqueName: \"kubernetes.io/projected/6139740a-dbfc-41c6-baf0-9651b805c47c-kube-api-access-6zkbk\") pod \"6139740a-dbfc-41c6-baf0-9651b805c47c\" (UID: \"6139740a-dbfc-41c6-baf0-9651b805c47c\") " Nov 24 13:51:52 crc kubenswrapper[5039]: I1124 13:51:52.597417 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/6139740a-dbfc-41c6-baf0-9651b805c47c-inventory-0\") pod \"6139740a-dbfc-41c6-baf0-9651b805c47c\" (UID: \"6139740a-dbfc-41c6-baf0-9651b805c47c\") " Nov 24 13:51:52 crc kubenswrapper[5039]: I1124 13:51:52.597461 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6139740a-dbfc-41c6-baf0-9651b805c47c-ssh-key-openstack-edpm-ipam\") pod \"6139740a-dbfc-41c6-baf0-9651b805c47c\" (UID: \"6139740a-dbfc-41c6-baf0-9651b805c47c\") " Nov 24 13:51:52 crc kubenswrapper[5039]: I1124 13:51:52.605910 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6139740a-dbfc-41c6-baf0-9651b805c47c-kube-api-access-6zkbk" (OuterVolumeSpecName: "kube-api-access-6zkbk") pod "6139740a-dbfc-41c6-baf0-9651b805c47c" (UID: "6139740a-dbfc-41c6-baf0-9651b805c47c"). InnerVolumeSpecName "kube-api-access-6zkbk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:51:52 crc kubenswrapper[5039]: I1124 13:51:52.631267 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6139740a-dbfc-41c6-baf0-9651b805c47c-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "6139740a-dbfc-41c6-baf0-9651b805c47c" (UID: "6139740a-dbfc-41c6-baf0-9651b805c47c"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:51:52 crc kubenswrapper[5039]: I1124 13:51:52.633728 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6139740a-dbfc-41c6-baf0-9651b805c47c-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "6139740a-dbfc-41c6-baf0-9651b805c47c" (UID: "6139740a-dbfc-41c6-baf0-9651b805c47c"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:51:52 crc kubenswrapper[5039]: I1124 13:51:52.699815 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6zkbk\" (UniqueName: \"kubernetes.io/projected/6139740a-dbfc-41c6-baf0-9651b805c47c-kube-api-access-6zkbk\") on node \"crc\" DevicePath \"\"" Nov 24 13:51:52 crc kubenswrapper[5039]: I1124 13:51:52.699852 5039 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/6139740a-dbfc-41c6-baf0-9651b805c47c-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 24 13:51:52 crc kubenswrapper[5039]: I1124 13:51:52.699862 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6139740a-dbfc-41c6-baf0-9651b805c47c-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 24 13:51:52 crc kubenswrapper[5039]: I1124 13:51:52.926362 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-vk2n7" event={"ID":"6139740a-dbfc-41c6-baf0-9651b805c47c","Type":"ContainerDied","Data":"39abf97b782780bd10434522ecd210bb78eaf9621c89087f5fdfded286dd9639"} Nov 24 13:51:52 crc kubenswrapper[5039]: I1124 13:51:52.926408 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39abf97b782780bd10434522ecd210bb78eaf9621c89087f5fdfded286dd9639" Nov 24 13:51:52 crc kubenswrapper[5039]: I1124 13:51:52.926433 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-vk2n7" Nov 24 13:51:53 crc kubenswrapper[5039]: I1124 13:51:53.034290 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f"] Nov 24 13:51:53 crc kubenswrapper[5039]: E1124 13:51:53.034920 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6139740a-dbfc-41c6-baf0-9651b805c47c" containerName="ssh-known-hosts-edpm-deployment" Nov 24 13:51:53 crc kubenswrapper[5039]: I1124 13:51:53.034940 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="6139740a-dbfc-41c6-baf0-9651b805c47c" containerName="ssh-known-hosts-edpm-deployment" Nov 24 13:51:53 crc kubenswrapper[5039]: I1124 13:51:53.035203 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="6139740a-dbfc-41c6-baf0-9651b805c47c" containerName="ssh-known-hosts-edpm-deployment" Nov 24 13:51:53 crc kubenswrapper[5039]: I1124 13:51:53.036375 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f" Nov 24 13:51:53 crc kubenswrapper[5039]: I1124 13:51:53.038790 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 13:51:53 crc kubenswrapper[5039]: I1124 13:51:53.039711 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 13:51:53 crc kubenswrapper[5039]: I1124 13:51:53.040488 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 13:51:53 crc kubenswrapper[5039]: I1124 13:51:53.044743 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 13:51:53 crc kubenswrapper[5039]: I1124 13:51:53.063863 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f"] Nov 24 13:51:53 crc kubenswrapper[5039]: I1124 13:51:53.211583 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06972240-fdee-4d23-a066-5919ba1abd8c-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-g2n4f\" (UID: \"06972240-fdee-4d23-a066-5919ba1abd8c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f" Nov 24 13:51:53 crc kubenswrapper[5039]: I1124 13:51:53.211772 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tz2b2\" (UniqueName: \"kubernetes.io/projected/06972240-fdee-4d23-a066-5919ba1abd8c-kube-api-access-tz2b2\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-g2n4f\" (UID: \"06972240-fdee-4d23-a066-5919ba1abd8c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f" Nov 24 13:51:53 crc kubenswrapper[5039]: I1124 13:51:53.211964 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06972240-fdee-4d23-a066-5919ba1abd8c-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-g2n4f\" (UID: \"06972240-fdee-4d23-a066-5919ba1abd8c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f" Nov 24 13:51:53 crc kubenswrapper[5039]: I1124 13:51:53.315716 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06972240-fdee-4d23-a066-5919ba1abd8c-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-g2n4f\" (UID: \"06972240-fdee-4d23-a066-5919ba1abd8c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f" Nov 24 13:51:53 crc kubenswrapper[5039]: I1124 13:51:53.315812 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06972240-fdee-4d23-a066-5919ba1abd8c-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-g2n4f\" (UID: \"06972240-fdee-4d23-a066-5919ba1abd8c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f" Nov 24 13:51:53 crc kubenswrapper[5039]: I1124 13:51:53.315914 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tz2b2\" (UniqueName: \"kubernetes.io/projected/06972240-fdee-4d23-a066-5919ba1abd8c-kube-api-access-tz2b2\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-g2n4f\" (UID: \"06972240-fdee-4d23-a066-5919ba1abd8c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f" Nov 24 13:51:53 crc kubenswrapper[5039]: I1124 13:51:53.321392 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06972240-fdee-4d23-a066-5919ba1abd8c-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-g2n4f\" (UID: \"06972240-fdee-4d23-a066-5919ba1abd8c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f" Nov 24 13:51:53 crc kubenswrapper[5039]: I1124 13:51:53.321680 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06972240-fdee-4d23-a066-5919ba1abd8c-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-g2n4f\" (UID: \"06972240-fdee-4d23-a066-5919ba1abd8c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f" Nov 24 13:51:53 crc kubenswrapper[5039]: I1124 13:51:53.348095 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tz2b2\" (UniqueName: \"kubernetes.io/projected/06972240-fdee-4d23-a066-5919ba1abd8c-kube-api-access-tz2b2\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-g2n4f\" (UID: \"06972240-fdee-4d23-a066-5919ba1abd8c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f" Nov 24 13:51:53 crc kubenswrapper[5039]: I1124 13:51:53.359117 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f" Nov 24 13:51:53 crc kubenswrapper[5039]: W1124 13:51:53.961991 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod06972240_fdee_4d23_a066_5919ba1abd8c.slice/crio-2caccd5d094a715fce73ff7e92d8bc4df9ad28314a563ce4c57454ff25dae834 WatchSource:0}: Error finding container 2caccd5d094a715fce73ff7e92d8bc4df9ad28314a563ce4c57454ff25dae834: Status 404 returned error can't find the container with id 2caccd5d094a715fce73ff7e92d8bc4df9ad28314a563ce4c57454ff25dae834 Nov 24 13:51:53 crc kubenswrapper[5039]: I1124 13:51:53.965306 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f"] Nov 24 13:51:54 crc kubenswrapper[5039]: I1124 13:51:54.946444 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f" event={"ID":"06972240-fdee-4d23-a066-5919ba1abd8c","Type":"ContainerStarted","Data":"d767563b027f2775f56ec9462a59266a1b5d4e7da66a0cf000043e69842282a3"} Nov 24 13:51:54 crc kubenswrapper[5039]: I1124 13:51:54.947557 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f" event={"ID":"06972240-fdee-4d23-a066-5919ba1abd8c","Type":"ContainerStarted","Data":"2caccd5d094a715fce73ff7e92d8bc4df9ad28314a563ce4c57454ff25dae834"} Nov 24 13:51:54 crc kubenswrapper[5039]: I1124 13:51:54.972197 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f" podStartSLOduration=2.571803793 podStartE2EDuration="2.972177211s" podCreationTimestamp="2025-11-24 13:51:52 +0000 UTC" firstStartedPulling="2025-11-24 13:51:53.96447471 +0000 UTC m=+2026.403599210" lastFinishedPulling="2025-11-24 13:51:54.364848128 +0000 UTC m=+2026.803972628" observedRunningTime="2025-11-24 13:51:54.968192844 +0000 UTC m=+2027.407317414" watchObservedRunningTime="2025-11-24 13:51:54.972177211 +0000 UTC m=+2027.411301711" Nov 24 13:52:03 crc kubenswrapper[5039]: I1124 13:52:03.225323 5039 generic.go:334] "Generic (PLEG): container finished" podID="06972240-fdee-4d23-a066-5919ba1abd8c" containerID="d767563b027f2775f56ec9462a59266a1b5d4e7da66a0cf000043e69842282a3" exitCode=0 Nov 24 13:52:03 crc kubenswrapper[5039]: I1124 13:52:03.225417 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f" event={"ID":"06972240-fdee-4d23-a066-5919ba1abd8c","Type":"ContainerDied","Data":"d767563b027f2775f56ec9462a59266a1b5d4e7da66a0cf000043e69842282a3"} Nov 24 13:52:04 crc kubenswrapper[5039]: I1124 13:52:04.760050 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f" Nov 24 13:52:04 crc kubenswrapper[5039]: I1124 13:52:04.869593 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06972240-fdee-4d23-a066-5919ba1abd8c-inventory\") pod \"06972240-fdee-4d23-a066-5919ba1abd8c\" (UID: \"06972240-fdee-4d23-a066-5919ba1abd8c\") " Nov 24 13:52:04 crc kubenswrapper[5039]: I1124 13:52:04.869914 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06972240-fdee-4d23-a066-5919ba1abd8c-ssh-key\") pod \"06972240-fdee-4d23-a066-5919ba1abd8c\" (UID: \"06972240-fdee-4d23-a066-5919ba1abd8c\") " Nov 24 13:52:04 crc kubenswrapper[5039]: I1124 13:52:04.869977 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tz2b2\" (UniqueName: \"kubernetes.io/projected/06972240-fdee-4d23-a066-5919ba1abd8c-kube-api-access-tz2b2\") pod \"06972240-fdee-4d23-a066-5919ba1abd8c\" (UID: \"06972240-fdee-4d23-a066-5919ba1abd8c\") " Nov 24 13:52:04 crc kubenswrapper[5039]: I1124 13:52:04.880198 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06972240-fdee-4d23-a066-5919ba1abd8c-kube-api-access-tz2b2" (OuterVolumeSpecName: "kube-api-access-tz2b2") pod "06972240-fdee-4d23-a066-5919ba1abd8c" (UID: "06972240-fdee-4d23-a066-5919ba1abd8c"). InnerVolumeSpecName "kube-api-access-tz2b2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:52:04 crc kubenswrapper[5039]: I1124 13:52:04.917703 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06972240-fdee-4d23-a066-5919ba1abd8c-inventory" (OuterVolumeSpecName: "inventory") pod "06972240-fdee-4d23-a066-5919ba1abd8c" (UID: "06972240-fdee-4d23-a066-5919ba1abd8c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:52:04 crc kubenswrapper[5039]: I1124 13:52:04.921650 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06972240-fdee-4d23-a066-5919ba1abd8c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "06972240-fdee-4d23-a066-5919ba1abd8c" (UID: "06972240-fdee-4d23-a066-5919ba1abd8c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:52:04 crc kubenswrapper[5039]: I1124 13:52:04.972259 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06972240-fdee-4d23-a066-5919ba1abd8c-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 13:52:04 crc kubenswrapper[5039]: I1124 13:52:04.972511 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06972240-fdee-4d23-a066-5919ba1abd8c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 13:52:04 crc kubenswrapper[5039]: I1124 13:52:04.972582 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tz2b2\" (UniqueName: \"kubernetes.io/projected/06972240-fdee-4d23-a066-5919ba1abd8c-kube-api-access-tz2b2\") on node \"crc\" DevicePath \"\"" Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.248179 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f" event={"ID":"06972240-fdee-4d23-a066-5919ba1abd8c","Type":"ContainerDied","Data":"2caccd5d094a715fce73ff7e92d8bc4df9ad28314a563ce4c57454ff25dae834"} Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.248528 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2caccd5d094a715fce73ff7e92d8bc4df9ad28314a563ce4c57454ff25dae834" Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.248213 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f" Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.330416 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg"] Nov 24 13:52:05 crc kubenswrapper[5039]: E1124 13:52:05.331233 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06972240-fdee-4d23-a066-5919ba1abd8c" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.331354 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="06972240-fdee-4d23-a066-5919ba1abd8c" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.331794 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="06972240-fdee-4d23-a066-5919ba1abd8c" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.332836 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg" Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.336294 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.336337 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.338194 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.338354 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.340764 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg"] Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.485800 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krpxb\" (UniqueName: \"kubernetes.io/projected/5a0cc535-f6a2-4a08-acfc-fa0c605359bd-kube-api-access-krpxb\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg\" (UID: \"5a0cc535-f6a2-4a08-acfc-fa0c605359bd\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg" Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.485969 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5a0cc535-f6a2-4a08-acfc-fa0c605359bd-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg\" (UID: \"5a0cc535-f6a2-4a08-acfc-fa0c605359bd\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg" Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.486120 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5a0cc535-f6a2-4a08-acfc-fa0c605359bd-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg\" (UID: \"5a0cc535-f6a2-4a08-acfc-fa0c605359bd\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg" Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.588578 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5a0cc535-f6a2-4a08-acfc-fa0c605359bd-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg\" (UID: \"5a0cc535-f6a2-4a08-acfc-fa0c605359bd\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg" Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.588865 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krpxb\" (UniqueName: \"kubernetes.io/projected/5a0cc535-f6a2-4a08-acfc-fa0c605359bd-kube-api-access-krpxb\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg\" (UID: \"5a0cc535-f6a2-4a08-acfc-fa0c605359bd\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg" Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.589044 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5a0cc535-f6a2-4a08-acfc-fa0c605359bd-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg\" (UID: \"5a0cc535-f6a2-4a08-acfc-fa0c605359bd\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg" Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.592245 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5a0cc535-f6a2-4a08-acfc-fa0c605359bd-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg\" (UID: \"5a0cc535-f6a2-4a08-acfc-fa0c605359bd\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg" Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.594009 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5a0cc535-f6a2-4a08-acfc-fa0c605359bd-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg\" (UID: \"5a0cc535-f6a2-4a08-acfc-fa0c605359bd\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg" Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.605149 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krpxb\" (UniqueName: \"kubernetes.io/projected/5a0cc535-f6a2-4a08-acfc-fa0c605359bd-kube-api-access-krpxb\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg\" (UID: \"5a0cc535-f6a2-4a08-acfc-fa0c605359bd\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg" Nov 24 13:52:05 crc kubenswrapper[5039]: I1124 13:52:05.652199 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg" Nov 24 13:52:06 crc kubenswrapper[5039]: I1124 13:52:06.180206 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg"] Nov 24 13:52:06 crc kubenswrapper[5039]: I1124 13:52:06.257036 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg" event={"ID":"5a0cc535-f6a2-4a08-acfc-fa0c605359bd","Type":"ContainerStarted","Data":"bd0fde478c0291ffbd71b75c8e89460362248d3ee751e81c31595fffe73f37ab"} Nov 24 13:52:08 crc kubenswrapper[5039]: I1124 13:52:08.278235 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg" event={"ID":"5a0cc535-f6a2-4a08-acfc-fa0c605359bd","Type":"ContainerStarted","Data":"eda5c96b2c1990611eae5b33f4c3e26a4b1481705c37f1f2b0d0a548f56cc181"} Nov 24 13:52:08 crc kubenswrapper[5039]: I1124 13:52:08.301543 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg" podStartSLOduration=2.468876651 podStartE2EDuration="3.301497807s" podCreationTimestamp="2025-11-24 13:52:05 +0000 UTC" firstStartedPulling="2025-11-24 13:52:06.190334102 +0000 UTC m=+2038.629458602" lastFinishedPulling="2025-11-24 13:52:07.022955258 +0000 UTC m=+2039.462079758" observedRunningTime="2025-11-24 13:52:08.290666491 +0000 UTC m=+2040.729791011" watchObservedRunningTime="2025-11-24 13:52:08.301497807 +0000 UTC m=+2040.740622327" Nov 24 13:52:17 crc kubenswrapper[5039]: I1124 13:52:17.367379 5039 generic.go:334] "Generic (PLEG): container finished" podID="5a0cc535-f6a2-4a08-acfc-fa0c605359bd" containerID="eda5c96b2c1990611eae5b33f4c3e26a4b1481705c37f1f2b0d0a548f56cc181" exitCode=0 Nov 24 13:52:17 crc kubenswrapper[5039]: I1124 13:52:17.367421 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg" event={"ID":"5a0cc535-f6a2-4a08-acfc-fa0c605359bd","Type":"ContainerDied","Data":"eda5c96b2c1990611eae5b33f4c3e26a4b1481705c37f1f2b0d0a548f56cc181"} Nov 24 13:52:18 crc kubenswrapper[5039]: I1124 13:52:18.924609 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.070848 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5a0cc535-f6a2-4a08-acfc-fa0c605359bd-inventory\") pod \"5a0cc535-f6a2-4a08-acfc-fa0c605359bd\" (UID: \"5a0cc535-f6a2-4a08-acfc-fa0c605359bd\") " Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.070937 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5a0cc535-f6a2-4a08-acfc-fa0c605359bd-ssh-key\") pod \"5a0cc535-f6a2-4a08-acfc-fa0c605359bd\" (UID: \"5a0cc535-f6a2-4a08-acfc-fa0c605359bd\") " Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.070964 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-krpxb\" (UniqueName: \"kubernetes.io/projected/5a0cc535-f6a2-4a08-acfc-fa0c605359bd-kube-api-access-krpxb\") pod \"5a0cc535-f6a2-4a08-acfc-fa0c605359bd\" (UID: \"5a0cc535-f6a2-4a08-acfc-fa0c605359bd\") " Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.077069 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a0cc535-f6a2-4a08-acfc-fa0c605359bd-kube-api-access-krpxb" (OuterVolumeSpecName: "kube-api-access-krpxb") pod "5a0cc535-f6a2-4a08-acfc-fa0c605359bd" (UID: "5a0cc535-f6a2-4a08-acfc-fa0c605359bd"). InnerVolumeSpecName "kube-api-access-krpxb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.116269 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a0cc535-f6a2-4a08-acfc-fa0c605359bd-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5a0cc535-f6a2-4a08-acfc-fa0c605359bd" (UID: "5a0cc535-f6a2-4a08-acfc-fa0c605359bd"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.130541 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a0cc535-f6a2-4a08-acfc-fa0c605359bd-inventory" (OuterVolumeSpecName: "inventory") pod "5a0cc535-f6a2-4a08-acfc-fa0c605359bd" (UID: "5a0cc535-f6a2-4a08-acfc-fa0c605359bd"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.175462 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5a0cc535-f6a2-4a08-acfc-fa0c605359bd-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.175520 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5a0cc535-f6a2-4a08-acfc-fa0c605359bd-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.175533 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-krpxb\" (UniqueName: \"kubernetes.io/projected/5a0cc535-f6a2-4a08-acfc-fa0c605359bd-kube-api-access-krpxb\") on node \"crc\" DevicePath \"\"" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.392154 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg" event={"ID":"5a0cc535-f6a2-4a08-acfc-fa0c605359bd","Type":"ContainerDied","Data":"bd0fde478c0291ffbd71b75c8e89460362248d3ee751e81c31595fffe73f37ab"} Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.392189 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bd0fde478c0291ffbd71b75c8e89460362248d3ee751e81c31595fffe73f37ab" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.392287 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.527033 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z"] Nov 24 13:52:19 crc kubenswrapper[5039]: E1124 13:52:19.527496 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a0cc535-f6a2-4a08-acfc-fa0c605359bd" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.527536 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a0cc535-f6a2-4a08-acfc-fa0c605359bd" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.527734 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a0cc535-f6a2-4a08-acfc-fa0c605359bd" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.528410 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.532289 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.532480 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.532300 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.532313 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.532853 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.532975 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.534227 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.536248 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.547641 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z"] Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.583461 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.583965 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.584013 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.584083 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.584126 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.584177 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.584220 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.584246 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.584348 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.584388 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnkh9\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-kube-api-access-dnkh9\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.584417 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.584452 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.584510 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.685935 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.686001 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.686073 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.686819 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.686901 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.686961 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.686995 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.687095 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.687157 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnkh9\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-kube-api-access-dnkh9\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.687204 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.687258 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.687322 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.687371 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.693368 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.693408 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.694585 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.694827 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.695753 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.698232 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.698235 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.698603 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.699366 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.699380 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.699814 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.702548 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.714347 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnkh9\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-kube-api-access-dnkh9\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:19 crc kubenswrapper[5039]: I1124 13:52:19.853573 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:52:20 crc kubenswrapper[5039]: I1124 13:52:20.353932 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5xn2s"] Nov 24 13:52:20 crc kubenswrapper[5039]: I1124 13:52:20.357199 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5xn2s" Nov 24 13:52:20 crc kubenswrapper[5039]: I1124 13:52:20.370824 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5xn2s"] Nov 24 13:52:20 crc kubenswrapper[5039]: I1124 13:52:20.457436 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z"] Nov 24 13:52:20 crc kubenswrapper[5039]: I1124 13:52:20.502863 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/097ba984-0064-465d-9d4e-dd7ede89c065-catalog-content\") pod \"community-operators-5xn2s\" (UID: \"097ba984-0064-465d-9d4e-dd7ede89c065\") " pod="openshift-marketplace/community-operators-5xn2s" Nov 24 13:52:20 crc kubenswrapper[5039]: I1124 13:52:20.503044 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9v99r\" (UniqueName: \"kubernetes.io/projected/097ba984-0064-465d-9d4e-dd7ede89c065-kube-api-access-9v99r\") pod \"community-operators-5xn2s\" (UID: \"097ba984-0064-465d-9d4e-dd7ede89c065\") " pod="openshift-marketplace/community-operators-5xn2s" Nov 24 13:52:20 crc kubenswrapper[5039]: I1124 13:52:20.503109 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/097ba984-0064-465d-9d4e-dd7ede89c065-utilities\") pod \"community-operators-5xn2s\" (UID: \"097ba984-0064-465d-9d4e-dd7ede89c065\") " pod="openshift-marketplace/community-operators-5xn2s" Nov 24 13:52:20 crc kubenswrapper[5039]: I1124 13:52:20.604473 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9v99r\" (UniqueName: \"kubernetes.io/projected/097ba984-0064-465d-9d4e-dd7ede89c065-kube-api-access-9v99r\") pod \"community-operators-5xn2s\" (UID: \"097ba984-0064-465d-9d4e-dd7ede89c065\") " pod="openshift-marketplace/community-operators-5xn2s" Nov 24 13:52:20 crc kubenswrapper[5039]: I1124 13:52:20.604588 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/097ba984-0064-465d-9d4e-dd7ede89c065-utilities\") pod \"community-operators-5xn2s\" (UID: \"097ba984-0064-465d-9d4e-dd7ede89c065\") " pod="openshift-marketplace/community-operators-5xn2s" Nov 24 13:52:20 crc kubenswrapper[5039]: I1124 13:52:20.604685 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/097ba984-0064-465d-9d4e-dd7ede89c065-catalog-content\") pod \"community-operators-5xn2s\" (UID: \"097ba984-0064-465d-9d4e-dd7ede89c065\") " pod="openshift-marketplace/community-operators-5xn2s" Nov 24 13:52:20 crc kubenswrapper[5039]: I1124 13:52:20.605213 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/097ba984-0064-465d-9d4e-dd7ede89c065-catalog-content\") pod \"community-operators-5xn2s\" (UID: \"097ba984-0064-465d-9d4e-dd7ede89c065\") " pod="openshift-marketplace/community-operators-5xn2s" Nov 24 13:52:20 crc kubenswrapper[5039]: I1124 13:52:20.605885 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/097ba984-0064-465d-9d4e-dd7ede89c065-utilities\") pod \"community-operators-5xn2s\" (UID: \"097ba984-0064-465d-9d4e-dd7ede89c065\") " pod="openshift-marketplace/community-operators-5xn2s" Nov 24 13:52:20 crc kubenswrapper[5039]: I1124 13:52:20.633763 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9v99r\" (UniqueName: \"kubernetes.io/projected/097ba984-0064-465d-9d4e-dd7ede89c065-kube-api-access-9v99r\") pod \"community-operators-5xn2s\" (UID: \"097ba984-0064-465d-9d4e-dd7ede89c065\") " pod="openshift-marketplace/community-operators-5xn2s" Nov 24 13:52:20 crc kubenswrapper[5039]: I1124 13:52:20.680694 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5xn2s" Nov 24 13:52:21 crc kubenswrapper[5039]: W1124 13:52:21.191308 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod097ba984_0064_465d_9d4e_dd7ede89c065.slice/crio-d6bfe2ffb03c61414ea8fad7700070dfffffca0c1978edd18f387c3102dfcfe9 WatchSource:0}: Error finding container d6bfe2ffb03c61414ea8fad7700070dfffffca0c1978edd18f387c3102dfcfe9: Status 404 returned error can't find the container with id d6bfe2ffb03c61414ea8fad7700070dfffffca0c1978edd18f387c3102dfcfe9 Nov 24 13:52:21 crc kubenswrapper[5039]: I1124 13:52:21.196253 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5xn2s"] Nov 24 13:52:21 crc kubenswrapper[5039]: I1124 13:52:21.417100 5039 generic.go:334] "Generic (PLEG): container finished" podID="097ba984-0064-465d-9d4e-dd7ede89c065" containerID="a7d7e1c6c27d27ba8ba51590ef27563be93e3bc0cb651032c4b39221ae60babd" exitCode=0 Nov 24 13:52:21 crc kubenswrapper[5039]: I1124 13:52:21.417156 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5xn2s" event={"ID":"097ba984-0064-465d-9d4e-dd7ede89c065","Type":"ContainerDied","Data":"a7d7e1c6c27d27ba8ba51590ef27563be93e3bc0cb651032c4b39221ae60babd"} Nov 24 13:52:21 crc kubenswrapper[5039]: I1124 13:52:21.417569 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5xn2s" event={"ID":"097ba984-0064-465d-9d4e-dd7ede89c065","Type":"ContainerStarted","Data":"d6bfe2ffb03c61414ea8fad7700070dfffffca0c1978edd18f387c3102dfcfe9"} Nov 24 13:52:21 crc kubenswrapper[5039]: I1124 13:52:21.420263 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" event={"ID":"3fe12f60-6522-4328-883c-2d2d05054d9e","Type":"ContainerStarted","Data":"df11dfed910cec147071faceefe217c350bc3d6c5205b50b5b4c6107402c0c04"} Nov 24 13:52:21 crc kubenswrapper[5039]: I1124 13:52:21.420301 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" event={"ID":"3fe12f60-6522-4328-883c-2d2d05054d9e","Type":"ContainerStarted","Data":"80d9f83f889e6db5ee0ab984231a3ecc6bb0ad9e874f057dfb69199963b8fef3"} Nov 24 13:52:21 crc kubenswrapper[5039]: I1124 13:52:21.482766 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" podStartSLOduration=2.000401844 podStartE2EDuration="2.482745598s" podCreationTimestamp="2025-11-24 13:52:19 +0000 UTC" firstStartedPulling="2025-11-24 13:52:20.463252609 +0000 UTC m=+2052.902377109" lastFinishedPulling="2025-11-24 13:52:20.945596353 +0000 UTC m=+2053.384720863" observedRunningTime="2025-11-24 13:52:21.462091253 +0000 UTC m=+2053.901215753" watchObservedRunningTime="2025-11-24 13:52:21.482745598 +0000 UTC m=+2053.921870108" Nov 24 13:52:22 crc kubenswrapper[5039]: I1124 13:52:22.431434 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5xn2s" event={"ID":"097ba984-0064-465d-9d4e-dd7ede89c065","Type":"ContainerStarted","Data":"7965a3ad3afa3bcbc4882b63da53da3572020e80ea0c7236ae7aff392a9e1cdf"} Nov 24 13:52:23 crc kubenswrapper[5039]: I1124 13:52:23.443886 5039 generic.go:334] "Generic (PLEG): container finished" podID="097ba984-0064-465d-9d4e-dd7ede89c065" containerID="7965a3ad3afa3bcbc4882b63da53da3572020e80ea0c7236ae7aff392a9e1cdf" exitCode=0 Nov 24 13:52:23 crc kubenswrapper[5039]: I1124 13:52:23.443938 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5xn2s" event={"ID":"097ba984-0064-465d-9d4e-dd7ede89c065","Type":"ContainerDied","Data":"7965a3ad3afa3bcbc4882b63da53da3572020e80ea0c7236ae7aff392a9e1cdf"} Nov 24 13:52:24 crc kubenswrapper[5039]: I1124 13:52:24.048398 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-qn652"] Nov 24 13:52:24 crc kubenswrapper[5039]: I1124 13:52:24.056999 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-qn652"] Nov 24 13:52:24 crc kubenswrapper[5039]: I1124 13:52:24.320778 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a720981-59f4-4a6c-bc6a-ea08f5aa101b" path="/var/lib/kubelet/pods/5a720981-59f4-4a6c-bc6a-ea08f5aa101b/volumes" Nov 24 13:52:24 crc kubenswrapper[5039]: I1124 13:52:24.467171 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5xn2s" event={"ID":"097ba984-0064-465d-9d4e-dd7ede89c065","Type":"ContainerStarted","Data":"09d977062523b2a5f31187e2d23dba3b16ad44bb8ea9bbc8141693c63f67d00d"} Nov 24 13:52:24 crc kubenswrapper[5039]: I1124 13:52:24.486584 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5xn2s" podStartSLOduration=2.056680183 podStartE2EDuration="4.486564678s" podCreationTimestamp="2025-11-24 13:52:20 +0000 UTC" firstStartedPulling="2025-11-24 13:52:21.419422028 +0000 UTC m=+2053.858546528" lastFinishedPulling="2025-11-24 13:52:23.849306523 +0000 UTC m=+2056.288431023" observedRunningTime="2025-11-24 13:52:24.484103217 +0000 UTC m=+2056.923227747" watchObservedRunningTime="2025-11-24 13:52:24.486564678 +0000 UTC m=+2056.925689178" Nov 24 13:52:27 crc kubenswrapper[5039]: I1124 13:52:27.032956 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-wzr9w"] Nov 24 13:52:27 crc kubenswrapper[5039]: I1124 13:52:27.042727 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-wzr9w"] Nov 24 13:52:28 crc kubenswrapper[5039]: I1124 13:52:28.038874 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-8ba9-account-create-qkpgl"] Nov 24 13:52:28 crc kubenswrapper[5039]: I1124 13:52:28.050223 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-8ba9-account-create-qkpgl"] Nov 24 13:52:28 crc kubenswrapper[5039]: I1124 13:52:28.319588 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="725180e7-824d-4133-ba16-8be24fa96cc9" path="/var/lib/kubelet/pods/725180e7-824d-4133-ba16-8be24fa96cc9/volumes" Nov 24 13:52:28 crc kubenswrapper[5039]: I1124 13:52:28.320565 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc53aceb-f577-4036-88f5-76d8fe736cb0" path="/var/lib/kubelet/pods/dc53aceb-f577-4036-88f5-76d8fe736cb0/volumes" Nov 24 13:52:30 crc kubenswrapper[5039]: I1124 13:52:30.681315 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5xn2s" Nov 24 13:52:30 crc kubenswrapper[5039]: I1124 13:52:30.681679 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5xn2s" Nov 24 13:52:30 crc kubenswrapper[5039]: I1124 13:52:30.737360 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5xn2s" Nov 24 13:52:31 crc kubenswrapper[5039]: I1124 13:52:31.611614 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5xn2s" Nov 24 13:52:31 crc kubenswrapper[5039]: I1124 13:52:31.676144 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5xn2s"] Nov 24 13:52:33 crc kubenswrapper[5039]: I1124 13:52:33.570175 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5xn2s" podUID="097ba984-0064-465d-9d4e-dd7ede89c065" containerName="registry-server" containerID="cri-o://09d977062523b2a5f31187e2d23dba3b16ad44bb8ea9bbc8141693c63f67d00d" gracePeriod=2 Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.087766 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5xn2s" Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.201308 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9v99r\" (UniqueName: \"kubernetes.io/projected/097ba984-0064-465d-9d4e-dd7ede89c065-kube-api-access-9v99r\") pod \"097ba984-0064-465d-9d4e-dd7ede89c065\" (UID: \"097ba984-0064-465d-9d4e-dd7ede89c065\") " Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.201369 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/097ba984-0064-465d-9d4e-dd7ede89c065-catalog-content\") pod \"097ba984-0064-465d-9d4e-dd7ede89c065\" (UID: \"097ba984-0064-465d-9d4e-dd7ede89c065\") " Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.201547 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/097ba984-0064-465d-9d4e-dd7ede89c065-utilities\") pod \"097ba984-0064-465d-9d4e-dd7ede89c065\" (UID: \"097ba984-0064-465d-9d4e-dd7ede89c065\") " Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.202336 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/097ba984-0064-465d-9d4e-dd7ede89c065-utilities" (OuterVolumeSpecName: "utilities") pod "097ba984-0064-465d-9d4e-dd7ede89c065" (UID: "097ba984-0064-465d-9d4e-dd7ede89c065"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.206873 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/097ba984-0064-465d-9d4e-dd7ede89c065-kube-api-access-9v99r" (OuterVolumeSpecName: "kube-api-access-9v99r") pod "097ba984-0064-465d-9d4e-dd7ede89c065" (UID: "097ba984-0064-465d-9d4e-dd7ede89c065"). InnerVolumeSpecName "kube-api-access-9v99r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.248400 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/097ba984-0064-465d-9d4e-dd7ede89c065-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "097ba984-0064-465d-9d4e-dd7ede89c065" (UID: "097ba984-0064-465d-9d4e-dd7ede89c065"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.305044 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9v99r\" (UniqueName: \"kubernetes.io/projected/097ba984-0064-465d-9d4e-dd7ede89c065-kube-api-access-9v99r\") on node \"crc\" DevicePath \"\"" Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.305101 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/097ba984-0064-465d-9d4e-dd7ede89c065-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.305113 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/097ba984-0064-465d-9d4e-dd7ede89c065-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.581868 5039 generic.go:334] "Generic (PLEG): container finished" podID="097ba984-0064-465d-9d4e-dd7ede89c065" containerID="09d977062523b2a5f31187e2d23dba3b16ad44bb8ea9bbc8141693c63f67d00d" exitCode=0 Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.581924 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5xn2s" event={"ID":"097ba984-0064-465d-9d4e-dd7ede89c065","Type":"ContainerDied","Data":"09d977062523b2a5f31187e2d23dba3b16ad44bb8ea9bbc8141693c63f67d00d"} Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.581946 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5xn2s" Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.581960 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5xn2s" event={"ID":"097ba984-0064-465d-9d4e-dd7ede89c065","Type":"ContainerDied","Data":"d6bfe2ffb03c61414ea8fad7700070dfffffca0c1978edd18f387c3102dfcfe9"} Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.582030 5039 scope.go:117] "RemoveContainer" containerID="09d977062523b2a5f31187e2d23dba3b16ad44bb8ea9bbc8141693c63f67d00d" Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.613189 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5xn2s"] Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.614258 5039 scope.go:117] "RemoveContainer" containerID="7965a3ad3afa3bcbc4882b63da53da3572020e80ea0c7236ae7aff392a9e1cdf" Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.629571 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5xn2s"] Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.637962 5039 scope.go:117] "RemoveContainer" containerID="a7d7e1c6c27d27ba8ba51590ef27563be93e3bc0cb651032c4b39221ae60babd" Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.681992 5039 scope.go:117] "RemoveContainer" containerID="09d977062523b2a5f31187e2d23dba3b16ad44bb8ea9bbc8141693c63f67d00d" Nov 24 13:52:34 crc kubenswrapper[5039]: E1124 13:52:34.682682 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09d977062523b2a5f31187e2d23dba3b16ad44bb8ea9bbc8141693c63f67d00d\": container with ID starting with 09d977062523b2a5f31187e2d23dba3b16ad44bb8ea9bbc8141693c63f67d00d not found: ID does not exist" containerID="09d977062523b2a5f31187e2d23dba3b16ad44bb8ea9bbc8141693c63f67d00d" Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.682726 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09d977062523b2a5f31187e2d23dba3b16ad44bb8ea9bbc8141693c63f67d00d"} err="failed to get container status \"09d977062523b2a5f31187e2d23dba3b16ad44bb8ea9bbc8141693c63f67d00d\": rpc error: code = NotFound desc = could not find container \"09d977062523b2a5f31187e2d23dba3b16ad44bb8ea9bbc8141693c63f67d00d\": container with ID starting with 09d977062523b2a5f31187e2d23dba3b16ad44bb8ea9bbc8141693c63f67d00d not found: ID does not exist" Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.682753 5039 scope.go:117] "RemoveContainer" containerID="7965a3ad3afa3bcbc4882b63da53da3572020e80ea0c7236ae7aff392a9e1cdf" Nov 24 13:52:34 crc kubenswrapper[5039]: E1124 13:52:34.683099 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7965a3ad3afa3bcbc4882b63da53da3572020e80ea0c7236ae7aff392a9e1cdf\": container with ID starting with 7965a3ad3afa3bcbc4882b63da53da3572020e80ea0c7236ae7aff392a9e1cdf not found: ID does not exist" containerID="7965a3ad3afa3bcbc4882b63da53da3572020e80ea0c7236ae7aff392a9e1cdf" Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.683129 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7965a3ad3afa3bcbc4882b63da53da3572020e80ea0c7236ae7aff392a9e1cdf"} err="failed to get container status \"7965a3ad3afa3bcbc4882b63da53da3572020e80ea0c7236ae7aff392a9e1cdf\": rpc error: code = NotFound desc = could not find container \"7965a3ad3afa3bcbc4882b63da53da3572020e80ea0c7236ae7aff392a9e1cdf\": container with ID starting with 7965a3ad3afa3bcbc4882b63da53da3572020e80ea0c7236ae7aff392a9e1cdf not found: ID does not exist" Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.683146 5039 scope.go:117] "RemoveContainer" containerID="a7d7e1c6c27d27ba8ba51590ef27563be93e3bc0cb651032c4b39221ae60babd" Nov 24 13:52:34 crc kubenswrapper[5039]: E1124 13:52:34.683967 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7d7e1c6c27d27ba8ba51590ef27563be93e3bc0cb651032c4b39221ae60babd\": container with ID starting with a7d7e1c6c27d27ba8ba51590ef27563be93e3bc0cb651032c4b39221ae60babd not found: ID does not exist" containerID="a7d7e1c6c27d27ba8ba51590ef27563be93e3bc0cb651032c4b39221ae60babd" Nov 24 13:52:34 crc kubenswrapper[5039]: I1124 13:52:34.684009 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7d7e1c6c27d27ba8ba51590ef27563be93e3bc0cb651032c4b39221ae60babd"} err="failed to get container status \"a7d7e1c6c27d27ba8ba51590ef27563be93e3bc0cb651032c4b39221ae60babd\": rpc error: code = NotFound desc = could not find container \"a7d7e1c6c27d27ba8ba51590ef27563be93e3bc0cb651032c4b39221ae60babd\": container with ID starting with a7d7e1c6c27d27ba8ba51590ef27563be93e3bc0cb651032c4b39221ae60babd not found: ID does not exist" Nov 24 13:52:36 crc kubenswrapper[5039]: I1124 13:52:36.321141 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="097ba984-0064-465d-9d4e-dd7ede89c065" path="/var/lib/kubelet/pods/097ba984-0064-465d-9d4e-dd7ede89c065/volumes" Nov 24 13:52:49 crc kubenswrapper[5039]: I1124 13:52:49.545473 5039 scope.go:117] "RemoveContainer" containerID="0bc29d7ec77e50a067302d1c7c1db679819a0c41054ca499b314238e31599ff7" Nov 24 13:52:49 crc kubenswrapper[5039]: I1124 13:52:49.590036 5039 scope.go:117] "RemoveContainer" containerID="650a92f781e12917b226edab0e88c2c82ba420834c78e13cf0c3dae5a421285f" Nov 24 13:52:49 crc kubenswrapper[5039]: I1124 13:52:49.623029 5039 scope.go:117] "RemoveContainer" containerID="133af9a7fdccba325874a1ce4edf2046dd1cfb39304d042d2247df286632b160" Nov 24 13:52:50 crc kubenswrapper[5039]: I1124 13:52:50.037229 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-dv6r5"] Nov 24 13:52:50 crc kubenswrapper[5039]: I1124 13:52:50.045269 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-dv6r5"] Nov 24 13:52:50 crc kubenswrapper[5039]: I1124 13:52:50.329286 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5558fd75-638f-4d1e-b0d8-e8e071471415" path="/var/lib/kubelet/pods/5558fd75-638f-4d1e-b0d8-e8e071471415/volumes" Nov 24 13:52:52 crc kubenswrapper[5039]: I1124 13:52:52.038149 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-968s2"] Nov 24 13:52:52 crc kubenswrapper[5039]: I1124 13:52:52.054837 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-968s2"] Nov 24 13:52:52 crc kubenswrapper[5039]: I1124 13:52:52.330519 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d757ee4-2d14-4644-9323-955537ee639e" path="/var/lib/kubelet/pods/9d757ee4-2d14-4644-9323-955537ee639e/volumes" Nov 24 13:52:58 crc kubenswrapper[5039]: I1124 13:52:58.856182 5039 generic.go:334] "Generic (PLEG): container finished" podID="3fe12f60-6522-4328-883c-2d2d05054d9e" containerID="df11dfed910cec147071faceefe217c350bc3d6c5205b50b5b4c6107402c0c04" exitCode=0 Nov 24 13:52:58 crc kubenswrapper[5039]: I1124 13:52:58.856272 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" event={"ID":"3fe12f60-6522-4328-883c-2d2d05054d9e","Type":"ContainerDied","Data":"df11dfed910cec147071faceefe217c350bc3d6c5205b50b5b4c6107402c0c04"} Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.334472 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.389700 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-bootstrap-combined-ca-bundle\") pod \"3fe12f60-6522-4328-883c-2d2d05054d9e\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.390962 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-ovn-combined-ca-bundle\") pod \"3fe12f60-6522-4328-883c-2d2d05054d9e\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.391025 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-telemetry-power-monitoring-combined-ca-bundle\") pod \"3fe12f60-6522-4328-883c-2d2d05054d9e\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.391047 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"3fe12f60-6522-4328-883c-2d2d05054d9e\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.391385 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-libvirt-combined-ca-bundle\") pod \"3fe12f60-6522-4328-883c-2d2d05054d9e\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.391418 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-ssh-key\") pod \"3fe12f60-6522-4328-883c-2d2d05054d9e\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.391443 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-ovn-default-certs-0\") pod \"3fe12f60-6522-4328-883c-2d2d05054d9e\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.391478 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-inventory\") pod \"3fe12f60-6522-4328-883c-2d2d05054d9e\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.391517 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"3fe12f60-6522-4328-883c-2d2d05054d9e\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.391599 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"3fe12f60-6522-4328-883c-2d2d05054d9e\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.391671 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-telemetry-combined-ca-bundle\") pod \"3fe12f60-6522-4328-883c-2d2d05054d9e\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.391699 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dnkh9\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-kube-api-access-dnkh9\") pod \"3fe12f60-6522-4328-883c-2d2d05054d9e\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.391725 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-repo-setup-combined-ca-bundle\") pod \"3fe12f60-6522-4328-883c-2d2d05054d9e\" (UID: \"3fe12f60-6522-4328-883c-2d2d05054d9e\") " Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.397549 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0") pod "3fe12f60-6522-4328-883c-2d2d05054d9e" (UID: "3fe12f60-6522-4328-883c-2d2d05054d9e"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.397972 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-telemetry-power-monitoring-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-power-monitoring-combined-ca-bundle") pod "3fe12f60-6522-4328-883c-2d2d05054d9e" (UID: "3fe12f60-6522-4328-883c-2d2d05054d9e"). InnerVolumeSpecName "telemetry-power-monitoring-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.398040 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "3fe12f60-6522-4328-883c-2d2d05054d9e" (UID: "3fe12f60-6522-4328-883c-2d2d05054d9e"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.400344 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "3fe12f60-6522-4328-883c-2d2d05054d9e" (UID: "3fe12f60-6522-4328-883c-2d2d05054d9e"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.400613 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "3fe12f60-6522-4328-883c-2d2d05054d9e" (UID: "3fe12f60-6522-4328-883c-2d2d05054d9e"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.402978 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-kube-api-access-dnkh9" (OuterVolumeSpecName: "kube-api-access-dnkh9") pod "3fe12f60-6522-4328-883c-2d2d05054d9e" (UID: "3fe12f60-6522-4328-883c-2d2d05054d9e"). InnerVolumeSpecName "kube-api-access-dnkh9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.404048 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "3fe12f60-6522-4328-883c-2d2d05054d9e" (UID: "3fe12f60-6522-4328-883c-2d2d05054d9e"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.409010 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "3fe12f60-6522-4328-883c-2d2d05054d9e" (UID: "3fe12f60-6522-4328-883c-2d2d05054d9e"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.410820 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "3fe12f60-6522-4328-883c-2d2d05054d9e" (UID: "3fe12f60-6522-4328-883c-2d2d05054d9e"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.416732 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "3fe12f60-6522-4328-883c-2d2d05054d9e" (UID: "3fe12f60-6522-4328-883c-2d2d05054d9e"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.421548 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "3fe12f60-6522-4328-883c-2d2d05054d9e" (UID: "3fe12f60-6522-4328-883c-2d2d05054d9e"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.436301 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-inventory" (OuterVolumeSpecName: "inventory") pod "3fe12f60-6522-4328-883c-2d2d05054d9e" (UID: "3fe12f60-6522-4328-883c-2d2d05054d9e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.442122 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3fe12f60-6522-4328-883c-2d2d05054d9e" (UID: "3fe12f60-6522-4328-883c-2d2d05054d9e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.495962 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.495993 5039 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.496008 5039 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.496021 5039 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.496032 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dnkh9\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-kube-api-access-dnkh9\") on node \"crc\" DevicePath \"\"" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.496041 5039 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.496051 5039 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.496060 5039 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.496069 5039 reconciler_common.go:293] "Volume detached for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-telemetry-power-monitoring-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.496079 5039 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.496089 5039 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.496096 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3fe12f60-6522-4328-883c-2d2d05054d9e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.496105 5039 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fe12f60-6522-4328-883c-2d2d05054d9e-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.881855 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" event={"ID":"3fe12f60-6522-4328-883c-2d2d05054d9e","Type":"ContainerDied","Data":"80d9f83f889e6db5ee0ab984231a3ecc6bb0ad9e874f057dfb69199963b8fef3"} Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.881893 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="80d9f83f889e6db5ee0ab984231a3ecc6bb0ad9e874f057dfb69199963b8fef3" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.882140 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.981222 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw"] Nov 24 13:53:00 crc kubenswrapper[5039]: E1124 13:53:00.981695 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="097ba984-0064-465d-9d4e-dd7ede89c065" containerName="extract-content" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.981708 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="097ba984-0064-465d-9d4e-dd7ede89c065" containerName="extract-content" Nov 24 13:53:00 crc kubenswrapper[5039]: E1124 13:53:00.981734 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3fe12f60-6522-4328-883c-2d2d05054d9e" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.981743 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="3fe12f60-6522-4328-883c-2d2d05054d9e" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 24 13:53:00 crc kubenswrapper[5039]: E1124 13:53:00.981753 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="097ba984-0064-465d-9d4e-dd7ede89c065" containerName="registry-server" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.981760 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="097ba984-0064-465d-9d4e-dd7ede89c065" containerName="registry-server" Nov 24 13:53:00 crc kubenswrapper[5039]: E1124 13:53:00.981768 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="097ba984-0064-465d-9d4e-dd7ede89c065" containerName="extract-utilities" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.981774 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="097ba984-0064-465d-9d4e-dd7ede89c065" containerName="extract-utilities" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.981992 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="3fe12f60-6522-4328-883c-2d2d05054d9e" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.982037 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="097ba984-0064-465d-9d4e-dd7ede89c065" containerName="registry-server" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.983869 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.987756 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.988127 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.988325 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.988494 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.994032 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 13:53:00 crc kubenswrapper[5039]: I1124 13:53:00.996913 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw"] Nov 24 13:53:01 crc kubenswrapper[5039]: I1124 13:53:01.004765 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-k56rw\" (UID: \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" Nov 24 13:53:01 crc kubenswrapper[5039]: I1124 13:53:01.004816 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-k56rw\" (UID: \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" Nov 24 13:53:01 crc kubenswrapper[5039]: I1124 13:53:01.004887 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-k56rw\" (UID: \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" Nov 24 13:53:01 crc kubenswrapper[5039]: I1124 13:53:01.004942 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pmqsx\" (UniqueName: \"kubernetes.io/projected/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-kube-api-access-pmqsx\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-k56rw\" (UID: \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" Nov 24 13:53:01 crc kubenswrapper[5039]: I1124 13:53:01.005076 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-k56rw\" (UID: \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" Nov 24 13:53:01 crc kubenswrapper[5039]: I1124 13:53:01.233064 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-k56rw\" (UID: \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" Nov 24 13:53:01 crc kubenswrapper[5039]: I1124 13:53:01.233131 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-k56rw\" (UID: \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" Nov 24 13:53:01 crc kubenswrapper[5039]: I1124 13:53:01.233216 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-k56rw\" (UID: \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" Nov 24 13:53:01 crc kubenswrapper[5039]: I1124 13:53:01.233265 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pmqsx\" (UniqueName: \"kubernetes.io/projected/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-kube-api-access-pmqsx\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-k56rw\" (UID: \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" Nov 24 13:53:01 crc kubenswrapper[5039]: I1124 13:53:01.233407 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-k56rw\" (UID: \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" Nov 24 13:53:01 crc kubenswrapper[5039]: I1124 13:53:01.236222 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-k56rw\" (UID: \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" Nov 24 13:53:01 crc kubenswrapper[5039]: I1124 13:53:01.238104 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-k56rw\" (UID: \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" Nov 24 13:53:01 crc kubenswrapper[5039]: I1124 13:53:01.238564 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-k56rw\" (UID: \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" Nov 24 13:53:01 crc kubenswrapper[5039]: I1124 13:53:01.248831 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-k56rw\" (UID: \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" Nov 24 13:53:01 crc kubenswrapper[5039]: I1124 13:53:01.267373 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pmqsx\" (UniqueName: \"kubernetes.io/projected/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-kube-api-access-pmqsx\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-k56rw\" (UID: \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" Nov 24 13:53:01 crc kubenswrapper[5039]: I1124 13:53:01.355007 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" Nov 24 13:53:01 crc kubenswrapper[5039]: I1124 13:53:01.881636 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw"] Nov 24 13:53:01 crc kubenswrapper[5039]: I1124 13:53:01.892066 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" event={"ID":"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784","Type":"ContainerStarted","Data":"faf392c0d57d1baef3742a7b29c53b753fb686c055164654823c68084929318f"} Nov 24 13:53:02 crc kubenswrapper[5039]: I1124 13:53:02.907445 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" event={"ID":"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784","Type":"ContainerStarted","Data":"3a38d5d38903093c1f9fb37a359cb88145b981cc1729e11b4c7aedfb73c61f5a"} Nov 24 13:53:02 crc kubenswrapper[5039]: I1124 13:53:02.929041 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" podStartSLOduration=2.509917545 podStartE2EDuration="2.929018535s" podCreationTimestamp="2025-11-24 13:53:00 +0000 UTC" firstStartedPulling="2025-11-24 13:53:01.880470586 +0000 UTC m=+2094.319595086" lastFinishedPulling="2025-11-24 13:53:02.299571556 +0000 UTC m=+2094.738696076" observedRunningTime="2025-11-24 13:53:02.924621818 +0000 UTC m=+2095.363746318" watchObservedRunningTime="2025-11-24 13:53:02.929018535 +0000 UTC m=+2095.368143035" Nov 24 13:53:13 crc kubenswrapper[5039]: I1124 13:53:13.057646 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-c2r8x"] Nov 24 13:53:13 crc kubenswrapper[5039]: I1124 13:53:13.061159 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c2r8x" Nov 24 13:53:13 crc kubenswrapper[5039]: I1124 13:53:13.079631 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-c2r8x"] Nov 24 13:53:13 crc kubenswrapper[5039]: I1124 13:53:13.204694 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2515f7f-0627-494c-a4ae-dbea100c89ee-utilities\") pod \"redhat-marketplace-c2r8x\" (UID: \"b2515f7f-0627-494c-a4ae-dbea100c89ee\") " pod="openshift-marketplace/redhat-marketplace-c2r8x" Nov 24 13:53:13 crc kubenswrapper[5039]: I1124 13:53:13.204777 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2515f7f-0627-494c-a4ae-dbea100c89ee-catalog-content\") pod \"redhat-marketplace-c2r8x\" (UID: \"b2515f7f-0627-494c-a4ae-dbea100c89ee\") " pod="openshift-marketplace/redhat-marketplace-c2r8x" Nov 24 13:53:13 crc kubenswrapper[5039]: I1124 13:53:13.204894 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlc8v\" (UniqueName: \"kubernetes.io/projected/b2515f7f-0627-494c-a4ae-dbea100c89ee-kube-api-access-hlc8v\") pod \"redhat-marketplace-c2r8x\" (UID: \"b2515f7f-0627-494c-a4ae-dbea100c89ee\") " pod="openshift-marketplace/redhat-marketplace-c2r8x" Nov 24 13:53:13 crc kubenswrapper[5039]: I1124 13:53:13.306620 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2515f7f-0627-494c-a4ae-dbea100c89ee-utilities\") pod \"redhat-marketplace-c2r8x\" (UID: \"b2515f7f-0627-494c-a4ae-dbea100c89ee\") " pod="openshift-marketplace/redhat-marketplace-c2r8x" Nov 24 13:53:13 crc kubenswrapper[5039]: I1124 13:53:13.306691 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2515f7f-0627-494c-a4ae-dbea100c89ee-catalog-content\") pod \"redhat-marketplace-c2r8x\" (UID: \"b2515f7f-0627-494c-a4ae-dbea100c89ee\") " pod="openshift-marketplace/redhat-marketplace-c2r8x" Nov 24 13:53:13 crc kubenswrapper[5039]: I1124 13:53:13.306783 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlc8v\" (UniqueName: \"kubernetes.io/projected/b2515f7f-0627-494c-a4ae-dbea100c89ee-kube-api-access-hlc8v\") pod \"redhat-marketplace-c2r8x\" (UID: \"b2515f7f-0627-494c-a4ae-dbea100c89ee\") " pod="openshift-marketplace/redhat-marketplace-c2r8x" Nov 24 13:53:13 crc kubenswrapper[5039]: I1124 13:53:13.307693 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2515f7f-0627-494c-a4ae-dbea100c89ee-utilities\") pod \"redhat-marketplace-c2r8x\" (UID: \"b2515f7f-0627-494c-a4ae-dbea100c89ee\") " pod="openshift-marketplace/redhat-marketplace-c2r8x" Nov 24 13:53:13 crc kubenswrapper[5039]: I1124 13:53:13.307910 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2515f7f-0627-494c-a4ae-dbea100c89ee-catalog-content\") pod \"redhat-marketplace-c2r8x\" (UID: \"b2515f7f-0627-494c-a4ae-dbea100c89ee\") " pod="openshift-marketplace/redhat-marketplace-c2r8x" Nov 24 13:53:13 crc kubenswrapper[5039]: I1124 13:53:13.334281 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlc8v\" (UniqueName: \"kubernetes.io/projected/b2515f7f-0627-494c-a4ae-dbea100c89ee-kube-api-access-hlc8v\") pod \"redhat-marketplace-c2r8x\" (UID: \"b2515f7f-0627-494c-a4ae-dbea100c89ee\") " pod="openshift-marketplace/redhat-marketplace-c2r8x" Nov 24 13:53:13 crc kubenswrapper[5039]: I1124 13:53:13.382004 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c2r8x" Nov 24 13:53:13 crc kubenswrapper[5039]: I1124 13:53:13.923575 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-c2r8x"] Nov 24 13:53:14 crc kubenswrapper[5039]: I1124 13:53:14.027169 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c2r8x" event={"ID":"b2515f7f-0627-494c-a4ae-dbea100c89ee","Type":"ContainerStarted","Data":"bdee8cebfd59d4fcfc79f3313d993ba63dc40ea01815681ed9941ea138b1c447"} Nov 24 13:53:15 crc kubenswrapper[5039]: I1124 13:53:15.037767 5039 generic.go:334] "Generic (PLEG): container finished" podID="b2515f7f-0627-494c-a4ae-dbea100c89ee" containerID="1e68103d1fc7c6f7b28a2f887afab4da9446e8e5f7c2011569aa1d35fe6c3a68" exitCode=0 Nov 24 13:53:15 crc kubenswrapper[5039]: I1124 13:53:15.037945 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c2r8x" event={"ID":"b2515f7f-0627-494c-a4ae-dbea100c89ee","Type":"ContainerDied","Data":"1e68103d1fc7c6f7b28a2f887afab4da9446e8e5f7c2011569aa1d35fe6c3a68"} Nov 24 13:53:16 crc kubenswrapper[5039]: I1124 13:53:16.050742 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c2r8x" event={"ID":"b2515f7f-0627-494c-a4ae-dbea100c89ee","Type":"ContainerStarted","Data":"bb1b18defc0f35e140c39ca0b4fbe3853bbf65ba1b1cbc1d40219ad6d1efaedc"} Nov 24 13:53:17 crc kubenswrapper[5039]: I1124 13:53:17.065196 5039 generic.go:334] "Generic (PLEG): container finished" podID="b2515f7f-0627-494c-a4ae-dbea100c89ee" containerID="bb1b18defc0f35e140c39ca0b4fbe3853bbf65ba1b1cbc1d40219ad6d1efaedc" exitCode=0 Nov 24 13:53:17 crc kubenswrapper[5039]: I1124 13:53:17.065259 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c2r8x" event={"ID":"b2515f7f-0627-494c-a4ae-dbea100c89ee","Type":"ContainerDied","Data":"bb1b18defc0f35e140c39ca0b4fbe3853bbf65ba1b1cbc1d40219ad6d1efaedc"} Nov 24 13:53:18 crc kubenswrapper[5039]: I1124 13:53:18.092262 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c2r8x" event={"ID":"b2515f7f-0627-494c-a4ae-dbea100c89ee","Type":"ContainerStarted","Data":"8303eb209ed87c8062bbdf6d6d0b39ed8e99b459b69157e032ede29b1806fb51"} Nov 24 13:53:18 crc kubenswrapper[5039]: I1124 13:53:18.111305 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-c2r8x" podStartSLOduration=2.4574294119999998 podStartE2EDuration="5.111288551s" podCreationTimestamp="2025-11-24 13:53:13 +0000 UTC" firstStartedPulling="2025-11-24 13:53:15.03945638 +0000 UTC m=+2107.478580880" lastFinishedPulling="2025-11-24 13:53:17.693315479 +0000 UTC m=+2110.132440019" observedRunningTime="2025-11-24 13:53:18.110364329 +0000 UTC m=+2110.549488829" watchObservedRunningTime="2025-11-24 13:53:18.111288551 +0000 UTC m=+2110.550413051" Nov 24 13:53:23 crc kubenswrapper[5039]: I1124 13:53:23.384676 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-c2r8x" Nov 24 13:53:23 crc kubenswrapper[5039]: I1124 13:53:23.385560 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-c2r8x" Nov 24 13:53:23 crc kubenswrapper[5039]: I1124 13:53:23.436741 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-c2r8x" Nov 24 13:53:24 crc kubenswrapper[5039]: I1124 13:53:24.217683 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-c2r8x" Nov 24 13:53:24 crc kubenswrapper[5039]: I1124 13:53:24.281982 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-c2r8x"] Nov 24 13:53:26 crc kubenswrapper[5039]: I1124 13:53:26.182682 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-c2r8x" podUID="b2515f7f-0627-494c-a4ae-dbea100c89ee" containerName="registry-server" containerID="cri-o://8303eb209ed87c8062bbdf6d6d0b39ed8e99b459b69157e032ede29b1806fb51" gracePeriod=2 Nov 24 13:53:26 crc kubenswrapper[5039]: I1124 13:53:26.632097 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c2r8x" Nov 24 13:53:26 crc kubenswrapper[5039]: I1124 13:53:26.722606 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2515f7f-0627-494c-a4ae-dbea100c89ee-utilities\") pod \"b2515f7f-0627-494c-a4ae-dbea100c89ee\" (UID: \"b2515f7f-0627-494c-a4ae-dbea100c89ee\") " Nov 24 13:53:26 crc kubenswrapper[5039]: I1124 13:53:26.722666 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlc8v\" (UniqueName: \"kubernetes.io/projected/b2515f7f-0627-494c-a4ae-dbea100c89ee-kube-api-access-hlc8v\") pod \"b2515f7f-0627-494c-a4ae-dbea100c89ee\" (UID: \"b2515f7f-0627-494c-a4ae-dbea100c89ee\") " Nov 24 13:53:26 crc kubenswrapper[5039]: I1124 13:53:26.722835 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2515f7f-0627-494c-a4ae-dbea100c89ee-catalog-content\") pod \"b2515f7f-0627-494c-a4ae-dbea100c89ee\" (UID: \"b2515f7f-0627-494c-a4ae-dbea100c89ee\") " Nov 24 13:53:26 crc kubenswrapper[5039]: I1124 13:53:26.723662 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2515f7f-0627-494c-a4ae-dbea100c89ee-utilities" (OuterVolumeSpecName: "utilities") pod "b2515f7f-0627-494c-a4ae-dbea100c89ee" (UID: "b2515f7f-0627-494c-a4ae-dbea100c89ee"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:53:26 crc kubenswrapper[5039]: I1124 13:53:26.727814 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2515f7f-0627-494c-a4ae-dbea100c89ee-kube-api-access-hlc8v" (OuterVolumeSpecName: "kube-api-access-hlc8v") pod "b2515f7f-0627-494c-a4ae-dbea100c89ee" (UID: "b2515f7f-0627-494c-a4ae-dbea100c89ee"). InnerVolumeSpecName "kube-api-access-hlc8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:53:26 crc kubenswrapper[5039]: I1124 13:53:26.744053 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2515f7f-0627-494c-a4ae-dbea100c89ee-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b2515f7f-0627-494c-a4ae-dbea100c89ee" (UID: "b2515f7f-0627-494c-a4ae-dbea100c89ee"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:53:26 crc kubenswrapper[5039]: I1124 13:53:26.825980 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2515f7f-0627-494c-a4ae-dbea100c89ee-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:53:26 crc kubenswrapper[5039]: I1124 13:53:26.826020 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlc8v\" (UniqueName: \"kubernetes.io/projected/b2515f7f-0627-494c-a4ae-dbea100c89ee-kube-api-access-hlc8v\") on node \"crc\" DevicePath \"\"" Nov 24 13:53:26 crc kubenswrapper[5039]: I1124 13:53:26.826034 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2515f7f-0627-494c-a4ae-dbea100c89ee-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:53:27 crc kubenswrapper[5039]: I1124 13:53:27.200485 5039 generic.go:334] "Generic (PLEG): container finished" podID="b2515f7f-0627-494c-a4ae-dbea100c89ee" containerID="8303eb209ed87c8062bbdf6d6d0b39ed8e99b459b69157e032ede29b1806fb51" exitCode=0 Nov 24 13:53:27 crc kubenswrapper[5039]: I1124 13:53:27.200543 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c2r8x" event={"ID":"b2515f7f-0627-494c-a4ae-dbea100c89ee","Type":"ContainerDied","Data":"8303eb209ed87c8062bbdf6d6d0b39ed8e99b459b69157e032ede29b1806fb51"} Nov 24 13:53:27 crc kubenswrapper[5039]: I1124 13:53:27.200615 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c2r8x" event={"ID":"b2515f7f-0627-494c-a4ae-dbea100c89ee","Type":"ContainerDied","Data":"bdee8cebfd59d4fcfc79f3313d993ba63dc40ea01815681ed9941ea138b1c447"} Nov 24 13:53:27 crc kubenswrapper[5039]: I1124 13:53:27.200637 5039 scope.go:117] "RemoveContainer" containerID="8303eb209ed87c8062bbdf6d6d0b39ed8e99b459b69157e032ede29b1806fb51" Nov 24 13:53:27 crc kubenswrapper[5039]: I1124 13:53:27.200563 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c2r8x" Nov 24 13:53:27 crc kubenswrapper[5039]: I1124 13:53:27.282582 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-c2r8x"] Nov 24 13:53:27 crc kubenswrapper[5039]: I1124 13:53:27.282697 5039 scope.go:117] "RemoveContainer" containerID="bb1b18defc0f35e140c39ca0b4fbe3853bbf65ba1b1cbc1d40219ad6d1efaedc" Nov 24 13:53:27 crc kubenswrapper[5039]: I1124 13:53:27.296419 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-c2r8x"] Nov 24 13:53:27 crc kubenswrapper[5039]: I1124 13:53:27.361722 5039 scope.go:117] "RemoveContainer" containerID="1e68103d1fc7c6f7b28a2f887afab4da9446e8e5f7c2011569aa1d35fe6c3a68" Nov 24 13:53:27 crc kubenswrapper[5039]: I1124 13:53:27.426132 5039 scope.go:117] "RemoveContainer" containerID="8303eb209ed87c8062bbdf6d6d0b39ed8e99b459b69157e032ede29b1806fb51" Nov 24 13:53:27 crc kubenswrapper[5039]: E1124 13:53:27.427381 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8303eb209ed87c8062bbdf6d6d0b39ed8e99b459b69157e032ede29b1806fb51\": container with ID starting with 8303eb209ed87c8062bbdf6d6d0b39ed8e99b459b69157e032ede29b1806fb51 not found: ID does not exist" containerID="8303eb209ed87c8062bbdf6d6d0b39ed8e99b459b69157e032ede29b1806fb51" Nov 24 13:53:27 crc kubenswrapper[5039]: I1124 13:53:27.427412 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8303eb209ed87c8062bbdf6d6d0b39ed8e99b459b69157e032ede29b1806fb51"} err="failed to get container status \"8303eb209ed87c8062bbdf6d6d0b39ed8e99b459b69157e032ede29b1806fb51\": rpc error: code = NotFound desc = could not find container \"8303eb209ed87c8062bbdf6d6d0b39ed8e99b459b69157e032ede29b1806fb51\": container with ID starting with 8303eb209ed87c8062bbdf6d6d0b39ed8e99b459b69157e032ede29b1806fb51 not found: ID does not exist" Nov 24 13:53:27 crc kubenswrapper[5039]: I1124 13:53:27.427434 5039 scope.go:117] "RemoveContainer" containerID="bb1b18defc0f35e140c39ca0b4fbe3853bbf65ba1b1cbc1d40219ad6d1efaedc" Nov 24 13:53:27 crc kubenswrapper[5039]: E1124 13:53:27.427728 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb1b18defc0f35e140c39ca0b4fbe3853bbf65ba1b1cbc1d40219ad6d1efaedc\": container with ID starting with bb1b18defc0f35e140c39ca0b4fbe3853bbf65ba1b1cbc1d40219ad6d1efaedc not found: ID does not exist" containerID="bb1b18defc0f35e140c39ca0b4fbe3853bbf65ba1b1cbc1d40219ad6d1efaedc" Nov 24 13:53:27 crc kubenswrapper[5039]: I1124 13:53:27.427754 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb1b18defc0f35e140c39ca0b4fbe3853bbf65ba1b1cbc1d40219ad6d1efaedc"} err="failed to get container status \"bb1b18defc0f35e140c39ca0b4fbe3853bbf65ba1b1cbc1d40219ad6d1efaedc\": rpc error: code = NotFound desc = could not find container \"bb1b18defc0f35e140c39ca0b4fbe3853bbf65ba1b1cbc1d40219ad6d1efaedc\": container with ID starting with bb1b18defc0f35e140c39ca0b4fbe3853bbf65ba1b1cbc1d40219ad6d1efaedc not found: ID does not exist" Nov 24 13:53:27 crc kubenswrapper[5039]: I1124 13:53:27.427768 5039 scope.go:117] "RemoveContainer" containerID="1e68103d1fc7c6f7b28a2f887afab4da9446e8e5f7c2011569aa1d35fe6c3a68" Nov 24 13:53:27 crc kubenswrapper[5039]: E1124 13:53:27.428053 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e68103d1fc7c6f7b28a2f887afab4da9446e8e5f7c2011569aa1d35fe6c3a68\": container with ID starting with 1e68103d1fc7c6f7b28a2f887afab4da9446e8e5f7c2011569aa1d35fe6c3a68 not found: ID does not exist" containerID="1e68103d1fc7c6f7b28a2f887afab4da9446e8e5f7c2011569aa1d35fe6c3a68" Nov 24 13:53:27 crc kubenswrapper[5039]: I1124 13:53:27.428081 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e68103d1fc7c6f7b28a2f887afab4da9446e8e5f7c2011569aa1d35fe6c3a68"} err="failed to get container status \"1e68103d1fc7c6f7b28a2f887afab4da9446e8e5f7c2011569aa1d35fe6c3a68\": rpc error: code = NotFound desc = could not find container \"1e68103d1fc7c6f7b28a2f887afab4da9446e8e5f7c2011569aa1d35fe6c3a68\": container with ID starting with 1e68103d1fc7c6f7b28a2f887afab4da9446e8e5f7c2011569aa1d35fe6c3a68 not found: ID does not exist" Nov 24 13:53:28 crc kubenswrapper[5039]: I1124 13:53:28.320986 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2515f7f-0627-494c-a4ae-dbea100c89ee" path="/var/lib/kubelet/pods/b2515f7f-0627-494c-a4ae-dbea100c89ee/volumes" Nov 24 13:53:28 crc kubenswrapper[5039]: I1124 13:53:28.597095 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-pfrqs"] Nov 24 13:53:28 crc kubenswrapper[5039]: E1124 13:53:28.598135 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2515f7f-0627-494c-a4ae-dbea100c89ee" containerName="extract-content" Nov 24 13:53:28 crc kubenswrapper[5039]: I1124 13:53:28.598298 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2515f7f-0627-494c-a4ae-dbea100c89ee" containerName="extract-content" Nov 24 13:53:28 crc kubenswrapper[5039]: E1124 13:53:28.598441 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2515f7f-0627-494c-a4ae-dbea100c89ee" containerName="registry-server" Nov 24 13:53:28 crc kubenswrapper[5039]: I1124 13:53:28.598652 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2515f7f-0627-494c-a4ae-dbea100c89ee" containerName="registry-server" Nov 24 13:53:28 crc kubenswrapper[5039]: E1124 13:53:28.598812 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2515f7f-0627-494c-a4ae-dbea100c89ee" containerName="extract-utilities" Nov 24 13:53:28 crc kubenswrapper[5039]: I1124 13:53:28.598927 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2515f7f-0627-494c-a4ae-dbea100c89ee" containerName="extract-utilities" Nov 24 13:53:28 crc kubenswrapper[5039]: I1124 13:53:28.599471 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2515f7f-0627-494c-a4ae-dbea100c89ee" containerName="registry-server" Nov 24 13:53:28 crc kubenswrapper[5039]: I1124 13:53:28.603920 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pfrqs" Nov 24 13:53:28 crc kubenswrapper[5039]: I1124 13:53:28.623778 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pfrqs"] Nov 24 13:53:28 crc kubenswrapper[5039]: I1124 13:53:28.770162 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef5ae1f3-da86-43e5-95cc-cf72a858c5eb-utilities\") pod \"certified-operators-pfrqs\" (UID: \"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb\") " pod="openshift-marketplace/certified-operators-pfrqs" Nov 24 13:53:28 crc kubenswrapper[5039]: I1124 13:53:28.770357 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef5ae1f3-da86-43e5-95cc-cf72a858c5eb-catalog-content\") pod \"certified-operators-pfrqs\" (UID: \"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb\") " pod="openshift-marketplace/certified-operators-pfrqs" Nov 24 13:53:28 crc kubenswrapper[5039]: I1124 13:53:28.770389 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djzgp\" (UniqueName: \"kubernetes.io/projected/ef5ae1f3-da86-43e5-95cc-cf72a858c5eb-kube-api-access-djzgp\") pod \"certified-operators-pfrqs\" (UID: \"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb\") " pod="openshift-marketplace/certified-operators-pfrqs" Nov 24 13:53:28 crc kubenswrapper[5039]: I1124 13:53:28.872901 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef5ae1f3-da86-43e5-95cc-cf72a858c5eb-catalog-content\") pod \"certified-operators-pfrqs\" (UID: \"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb\") " pod="openshift-marketplace/certified-operators-pfrqs" Nov 24 13:53:28 crc kubenswrapper[5039]: I1124 13:53:28.873257 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djzgp\" (UniqueName: \"kubernetes.io/projected/ef5ae1f3-da86-43e5-95cc-cf72a858c5eb-kube-api-access-djzgp\") pod \"certified-operators-pfrqs\" (UID: \"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb\") " pod="openshift-marketplace/certified-operators-pfrqs" Nov 24 13:53:28 crc kubenswrapper[5039]: I1124 13:53:28.873454 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef5ae1f3-da86-43e5-95cc-cf72a858c5eb-utilities\") pod \"certified-operators-pfrqs\" (UID: \"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb\") " pod="openshift-marketplace/certified-operators-pfrqs" Nov 24 13:53:28 crc kubenswrapper[5039]: I1124 13:53:28.873488 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef5ae1f3-da86-43e5-95cc-cf72a858c5eb-catalog-content\") pod \"certified-operators-pfrqs\" (UID: \"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb\") " pod="openshift-marketplace/certified-operators-pfrqs" Nov 24 13:53:28 crc kubenswrapper[5039]: I1124 13:53:28.873758 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef5ae1f3-da86-43e5-95cc-cf72a858c5eb-utilities\") pod \"certified-operators-pfrqs\" (UID: \"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb\") " pod="openshift-marketplace/certified-operators-pfrqs" Nov 24 13:53:28 crc kubenswrapper[5039]: I1124 13:53:28.896632 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djzgp\" (UniqueName: \"kubernetes.io/projected/ef5ae1f3-da86-43e5-95cc-cf72a858c5eb-kube-api-access-djzgp\") pod \"certified-operators-pfrqs\" (UID: \"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb\") " pod="openshift-marketplace/certified-operators-pfrqs" Nov 24 13:53:28 crc kubenswrapper[5039]: I1124 13:53:28.943443 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pfrqs" Nov 24 13:53:29 crc kubenswrapper[5039]: I1124 13:53:29.450142 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pfrqs"] Nov 24 13:53:29 crc kubenswrapper[5039]: E1124 13:53:29.915873 5039 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef5ae1f3_da86_43e5_95cc_cf72a858c5eb.slice/crio-c65b311b2005a7ce58e7249edd02f70ab5b1b0054756bef5fd1add0ab22f45ac.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef5ae1f3_da86_43e5_95cc_cf72a858c5eb.slice/crio-conmon-c65b311b2005a7ce58e7249edd02f70ab5b1b0054756bef5fd1add0ab22f45ac.scope\": RecentStats: unable to find data in memory cache]" Nov 24 13:53:30 crc kubenswrapper[5039]: I1124 13:53:30.236978 5039 generic.go:334] "Generic (PLEG): container finished" podID="ef5ae1f3-da86-43e5-95cc-cf72a858c5eb" containerID="c65b311b2005a7ce58e7249edd02f70ab5b1b0054756bef5fd1add0ab22f45ac" exitCode=0 Nov 24 13:53:30 crc kubenswrapper[5039]: I1124 13:53:30.237067 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfrqs" event={"ID":"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb","Type":"ContainerDied","Data":"c65b311b2005a7ce58e7249edd02f70ab5b1b0054756bef5fd1add0ab22f45ac"} Nov 24 13:53:30 crc kubenswrapper[5039]: I1124 13:53:30.237387 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfrqs" event={"ID":"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb","Type":"ContainerStarted","Data":"f647d0f4b55702eaa91207ad4092949748db321c3c9eb14ae8ffc767790754fc"} Nov 24 13:53:32 crc kubenswrapper[5039]: I1124 13:53:32.262141 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfrqs" event={"ID":"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb","Type":"ContainerStarted","Data":"d960d7beb23941cee8c2ca53dfd2190f78e9a931efd2ce8e2c21c241e7399b85"} Nov 24 13:53:33 crc kubenswrapper[5039]: I1124 13:53:33.294702 5039 generic.go:334] "Generic (PLEG): container finished" podID="ef5ae1f3-da86-43e5-95cc-cf72a858c5eb" containerID="d960d7beb23941cee8c2ca53dfd2190f78e9a931efd2ce8e2c21c241e7399b85" exitCode=0 Nov 24 13:53:33 crc kubenswrapper[5039]: I1124 13:53:33.294812 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfrqs" event={"ID":"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb","Type":"ContainerDied","Data":"d960d7beb23941cee8c2ca53dfd2190f78e9a931efd2ce8e2c21c241e7399b85"} Nov 24 13:53:34 crc kubenswrapper[5039]: I1124 13:53:34.319960 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfrqs" event={"ID":"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb","Type":"ContainerStarted","Data":"96afb61f54cc70a4e65c361b3a40b1327b76bc493b6090e55d5a0a0b958e20da"} Nov 24 13:53:34 crc kubenswrapper[5039]: I1124 13:53:34.337751 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-pfrqs" podStartSLOduration=2.851287166 podStartE2EDuration="6.337729337s" podCreationTimestamp="2025-11-24 13:53:28 +0000 UTC" firstStartedPulling="2025-11-24 13:53:30.239268664 +0000 UTC m=+2122.678393174" lastFinishedPulling="2025-11-24 13:53:33.725710825 +0000 UTC m=+2126.164835345" observedRunningTime="2025-11-24 13:53:34.330628294 +0000 UTC m=+2126.769752824" watchObservedRunningTime="2025-11-24 13:53:34.337729337 +0000 UTC m=+2126.776853847" Nov 24 13:53:36 crc kubenswrapper[5039]: I1124 13:53:36.053285 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-4vbpl"] Nov 24 13:53:36 crc kubenswrapper[5039]: I1124 13:53:36.070547 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-4vbpl"] Nov 24 13:53:36 crc kubenswrapper[5039]: I1124 13:53:36.335652 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a528dd72-29d9-43c4-8541-f6e416144724" path="/var/lib/kubelet/pods/a528dd72-29d9-43c4-8541-f6e416144724/volumes" Nov 24 13:53:38 crc kubenswrapper[5039]: I1124 13:53:38.944667 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-pfrqs" Nov 24 13:53:38 crc kubenswrapper[5039]: I1124 13:53:38.945207 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-pfrqs" Nov 24 13:53:39 crc kubenswrapper[5039]: I1124 13:53:39.018891 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-pfrqs" Nov 24 13:53:39 crc kubenswrapper[5039]: I1124 13:53:39.422702 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-pfrqs" Nov 24 13:53:40 crc kubenswrapper[5039]: I1124 13:53:40.905928 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pfrqs"] Nov 24 13:53:41 crc kubenswrapper[5039]: I1124 13:53:41.396371 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-pfrqs" podUID="ef5ae1f3-da86-43e5-95cc-cf72a858c5eb" containerName="registry-server" containerID="cri-o://96afb61f54cc70a4e65c361b3a40b1327b76bc493b6090e55d5a0a0b958e20da" gracePeriod=2 Nov 24 13:53:41 crc kubenswrapper[5039]: I1124 13:53:41.980550 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pfrqs" Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.076297 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef5ae1f3-da86-43e5-95cc-cf72a858c5eb-utilities\") pod \"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb\" (UID: \"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb\") " Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.076447 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef5ae1f3-da86-43e5-95cc-cf72a858c5eb-catalog-content\") pod \"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb\" (UID: \"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb\") " Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.076528 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djzgp\" (UniqueName: \"kubernetes.io/projected/ef5ae1f3-da86-43e5-95cc-cf72a858c5eb-kube-api-access-djzgp\") pod \"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb\" (UID: \"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb\") " Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.077346 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ef5ae1f3-da86-43e5-95cc-cf72a858c5eb-utilities" (OuterVolumeSpecName: "utilities") pod "ef5ae1f3-da86-43e5-95cc-cf72a858c5eb" (UID: "ef5ae1f3-da86-43e5-95cc-cf72a858c5eb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.082735 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef5ae1f3-da86-43e5-95cc-cf72a858c5eb-kube-api-access-djzgp" (OuterVolumeSpecName: "kube-api-access-djzgp") pod "ef5ae1f3-da86-43e5-95cc-cf72a858c5eb" (UID: "ef5ae1f3-da86-43e5-95cc-cf72a858c5eb"). InnerVolumeSpecName "kube-api-access-djzgp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.130989 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ef5ae1f3-da86-43e5-95cc-cf72a858c5eb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ef5ae1f3-da86-43e5-95cc-cf72a858c5eb" (UID: "ef5ae1f3-da86-43e5-95cc-cf72a858c5eb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.179436 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef5ae1f3-da86-43e5-95cc-cf72a858c5eb-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.179689 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef5ae1f3-da86-43e5-95cc-cf72a858c5eb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.179793 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djzgp\" (UniqueName: \"kubernetes.io/projected/ef5ae1f3-da86-43e5-95cc-cf72a858c5eb-kube-api-access-djzgp\") on node \"crc\" DevicePath \"\"" Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.412184 5039 generic.go:334] "Generic (PLEG): container finished" podID="ef5ae1f3-da86-43e5-95cc-cf72a858c5eb" containerID="96afb61f54cc70a4e65c361b3a40b1327b76bc493b6090e55d5a0a0b958e20da" exitCode=0 Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.412229 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pfrqs" Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.412233 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfrqs" event={"ID":"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb","Type":"ContainerDied","Data":"96afb61f54cc70a4e65c361b3a40b1327b76bc493b6090e55d5a0a0b958e20da"} Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.412344 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfrqs" event={"ID":"ef5ae1f3-da86-43e5-95cc-cf72a858c5eb","Type":"ContainerDied","Data":"f647d0f4b55702eaa91207ad4092949748db321c3c9eb14ae8ffc767790754fc"} Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.412368 5039 scope.go:117] "RemoveContainer" containerID="96afb61f54cc70a4e65c361b3a40b1327b76bc493b6090e55d5a0a0b958e20da" Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.440675 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pfrqs"] Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.440919 5039 scope.go:117] "RemoveContainer" containerID="d960d7beb23941cee8c2ca53dfd2190f78e9a931efd2ce8e2c21c241e7399b85" Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.453564 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-pfrqs"] Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.473828 5039 scope.go:117] "RemoveContainer" containerID="c65b311b2005a7ce58e7249edd02f70ab5b1b0054756bef5fd1add0ab22f45ac" Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.531473 5039 scope.go:117] "RemoveContainer" containerID="96afb61f54cc70a4e65c361b3a40b1327b76bc493b6090e55d5a0a0b958e20da" Nov 24 13:53:42 crc kubenswrapper[5039]: E1124 13:53:42.531988 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96afb61f54cc70a4e65c361b3a40b1327b76bc493b6090e55d5a0a0b958e20da\": container with ID starting with 96afb61f54cc70a4e65c361b3a40b1327b76bc493b6090e55d5a0a0b958e20da not found: ID does not exist" containerID="96afb61f54cc70a4e65c361b3a40b1327b76bc493b6090e55d5a0a0b958e20da" Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.532032 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96afb61f54cc70a4e65c361b3a40b1327b76bc493b6090e55d5a0a0b958e20da"} err="failed to get container status \"96afb61f54cc70a4e65c361b3a40b1327b76bc493b6090e55d5a0a0b958e20da\": rpc error: code = NotFound desc = could not find container \"96afb61f54cc70a4e65c361b3a40b1327b76bc493b6090e55d5a0a0b958e20da\": container with ID starting with 96afb61f54cc70a4e65c361b3a40b1327b76bc493b6090e55d5a0a0b958e20da not found: ID does not exist" Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.532058 5039 scope.go:117] "RemoveContainer" containerID="d960d7beb23941cee8c2ca53dfd2190f78e9a931efd2ce8e2c21c241e7399b85" Nov 24 13:53:42 crc kubenswrapper[5039]: E1124 13:53:42.532368 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d960d7beb23941cee8c2ca53dfd2190f78e9a931efd2ce8e2c21c241e7399b85\": container with ID starting with d960d7beb23941cee8c2ca53dfd2190f78e9a931efd2ce8e2c21c241e7399b85 not found: ID does not exist" containerID="d960d7beb23941cee8c2ca53dfd2190f78e9a931efd2ce8e2c21c241e7399b85" Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.532401 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d960d7beb23941cee8c2ca53dfd2190f78e9a931efd2ce8e2c21c241e7399b85"} err="failed to get container status \"d960d7beb23941cee8c2ca53dfd2190f78e9a931efd2ce8e2c21c241e7399b85\": rpc error: code = NotFound desc = could not find container \"d960d7beb23941cee8c2ca53dfd2190f78e9a931efd2ce8e2c21c241e7399b85\": container with ID starting with d960d7beb23941cee8c2ca53dfd2190f78e9a931efd2ce8e2c21c241e7399b85 not found: ID does not exist" Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.532421 5039 scope.go:117] "RemoveContainer" containerID="c65b311b2005a7ce58e7249edd02f70ab5b1b0054756bef5fd1add0ab22f45ac" Nov 24 13:53:42 crc kubenswrapper[5039]: E1124 13:53:42.532733 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c65b311b2005a7ce58e7249edd02f70ab5b1b0054756bef5fd1add0ab22f45ac\": container with ID starting with c65b311b2005a7ce58e7249edd02f70ab5b1b0054756bef5fd1add0ab22f45ac not found: ID does not exist" containerID="c65b311b2005a7ce58e7249edd02f70ab5b1b0054756bef5fd1add0ab22f45ac" Nov 24 13:53:42 crc kubenswrapper[5039]: I1124 13:53:42.532772 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c65b311b2005a7ce58e7249edd02f70ab5b1b0054756bef5fd1add0ab22f45ac"} err="failed to get container status \"c65b311b2005a7ce58e7249edd02f70ab5b1b0054756bef5fd1add0ab22f45ac\": rpc error: code = NotFound desc = could not find container \"c65b311b2005a7ce58e7249edd02f70ab5b1b0054756bef5fd1add0ab22f45ac\": container with ID starting with c65b311b2005a7ce58e7249edd02f70ab5b1b0054756bef5fd1add0ab22f45ac not found: ID does not exist" Nov 24 13:53:44 crc kubenswrapper[5039]: I1124 13:53:44.327677 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef5ae1f3-da86-43e5-95cc-cf72a858c5eb" path="/var/lib/kubelet/pods/ef5ae1f3-da86-43e5-95cc-cf72a858c5eb/volumes" Nov 24 13:53:49 crc kubenswrapper[5039]: I1124 13:53:49.785876 5039 scope.go:117] "RemoveContainer" containerID="deda2b7d214afbbb2221e74c6adc18fdcbbdb6361daacd63b80cc5d1e2b2617b" Nov 24 13:53:49 crc kubenswrapper[5039]: I1124 13:53:49.846310 5039 scope.go:117] "RemoveContainer" containerID="b4d85e4d184e4998db46db0b1205a979b16c3d967c5396ba0647aaf389585711" Nov 24 13:53:49 crc kubenswrapper[5039]: I1124 13:53:49.896984 5039 scope.go:117] "RemoveContainer" containerID="528e41cdbf10f445277ad9bbd7e0cee1cb39b9e0b270368cb046d67b713f70e2" Nov 24 13:53:50 crc kubenswrapper[5039]: I1124 13:53:50.101664 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:53:50 crc kubenswrapper[5039]: I1124 13:53:50.101720 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:54:13 crc kubenswrapper[5039]: I1124 13:54:13.765571 5039 generic.go:334] "Generic (PLEG): container finished" podID="4e8bb9f6-b0a6-4237-88cc-f99bd22f4784" containerID="3a38d5d38903093c1f9fb37a359cb88145b981cc1729e11b4c7aedfb73c61f5a" exitCode=0 Nov 24 13:54:13 crc kubenswrapper[5039]: I1124 13:54:13.765656 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" event={"ID":"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784","Type":"ContainerDied","Data":"3a38d5d38903093c1f9fb37a359cb88145b981cc1729e11b4c7aedfb73c61f5a"} Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.287332 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.458621 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-ssh-key\") pod \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\" (UID: \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\") " Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.458682 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-ovncontroller-config-0\") pod \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\" (UID: \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\") " Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.458736 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-ovn-combined-ca-bundle\") pod \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\" (UID: \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\") " Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.458774 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pmqsx\" (UniqueName: \"kubernetes.io/projected/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-kube-api-access-pmqsx\") pod \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\" (UID: \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\") " Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.458794 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-inventory\") pod \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\" (UID: \"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784\") " Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.467573 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "4e8bb9f6-b0a6-4237-88cc-f99bd22f4784" (UID: "4e8bb9f6-b0a6-4237-88cc-f99bd22f4784"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.467769 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-kube-api-access-pmqsx" (OuterVolumeSpecName: "kube-api-access-pmqsx") pod "4e8bb9f6-b0a6-4237-88cc-f99bd22f4784" (UID: "4e8bb9f6-b0a6-4237-88cc-f99bd22f4784"). InnerVolumeSpecName "kube-api-access-pmqsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.487900 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "4e8bb9f6-b0a6-4237-88cc-f99bd22f4784" (UID: "4e8bb9f6-b0a6-4237-88cc-f99bd22f4784"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.491106 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4e8bb9f6-b0a6-4237-88cc-f99bd22f4784" (UID: "4e8bb9f6-b0a6-4237-88cc-f99bd22f4784"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.499556 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-inventory" (OuterVolumeSpecName: "inventory") pod "4e8bb9f6-b0a6-4237-88cc-f99bd22f4784" (UID: "4e8bb9f6-b0a6-4237-88cc-f99bd22f4784"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.561061 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.561096 5039 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.561107 5039 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.561115 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pmqsx\" (UniqueName: \"kubernetes.io/projected/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-kube-api-access-pmqsx\") on node \"crc\" DevicePath \"\"" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.561123 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.788431 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" event={"ID":"4e8bb9f6-b0a6-4237-88cc-f99bd22f4784","Type":"ContainerDied","Data":"faf392c0d57d1baef3742a7b29c53b753fb686c055164654823c68084929318f"} Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.788465 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="faf392c0d57d1baef3742a7b29c53b753fb686c055164654823c68084929318f" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.788497 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.868373 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb"] Nov 24 13:54:15 crc kubenswrapper[5039]: E1124 13:54:15.868876 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef5ae1f3-da86-43e5-95cc-cf72a858c5eb" containerName="extract-utilities" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.868895 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef5ae1f3-da86-43e5-95cc-cf72a858c5eb" containerName="extract-utilities" Nov 24 13:54:15 crc kubenswrapper[5039]: E1124 13:54:15.868917 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef5ae1f3-da86-43e5-95cc-cf72a858c5eb" containerName="extract-content" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.868923 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef5ae1f3-da86-43e5-95cc-cf72a858c5eb" containerName="extract-content" Nov 24 13:54:15 crc kubenswrapper[5039]: E1124 13:54:15.868935 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e8bb9f6-b0a6-4237-88cc-f99bd22f4784" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.868941 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e8bb9f6-b0a6-4237-88cc-f99bd22f4784" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 24 13:54:15 crc kubenswrapper[5039]: E1124 13:54:15.868967 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef5ae1f3-da86-43e5-95cc-cf72a858c5eb" containerName="registry-server" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.868973 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef5ae1f3-da86-43e5-95cc-cf72a858c5eb" containerName="registry-server" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.869192 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e8bb9f6-b0a6-4237-88cc-f99bd22f4784" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.869216 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef5ae1f3-da86-43e5-95cc-cf72a858c5eb" containerName="registry-server" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.870096 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.874233 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.874311 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.874415 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.874760 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.874879 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 13:54:15 crc kubenswrapper[5039]: I1124 13:54:15.889649 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb"] Nov 24 13:54:16 crc kubenswrapper[5039]: I1124 13:54:16.071005 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb\" (UID: \"84ee730b-5dc4-4bbb-b817-4f65942865b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" Nov 24 13:54:16 crc kubenswrapper[5039]: I1124 13:54:16.071292 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb\" (UID: \"84ee730b-5dc4-4bbb-b817-4f65942865b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" Nov 24 13:54:16 crc kubenswrapper[5039]: I1124 13:54:16.071344 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb\" (UID: \"84ee730b-5dc4-4bbb-b817-4f65942865b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" Nov 24 13:54:16 crc kubenswrapper[5039]: I1124 13:54:16.071396 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb\" (UID: \"84ee730b-5dc4-4bbb-b817-4f65942865b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" Nov 24 13:54:16 crc kubenswrapper[5039]: I1124 13:54:16.071573 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkvbk\" (UniqueName: \"kubernetes.io/projected/84ee730b-5dc4-4bbb-b817-4f65942865b6-kube-api-access-xkvbk\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb\" (UID: \"84ee730b-5dc4-4bbb-b817-4f65942865b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" Nov 24 13:54:16 crc kubenswrapper[5039]: I1124 13:54:16.173392 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb\" (UID: \"84ee730b-5dc4-4bbb-b817-4f65942865b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" Nov 24 13:54:16 crc kubenswrapper[5039]: I1124 13:54:16.173741 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb\" (UID: \"84ee730b-5dc4-4bbb-b817-4f65942865b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" Nov 24 13:54:16 crc kubenswrapper[5039]: I1124 13:54:16.173835 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb\" (UID: \"84ee730b-5dc4-4bbb-b817-4f65942865b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" Nov 24 13:54:16 crc kubenswrapper[5039]: I1124 13:54:16.173921 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb\" (UID: \"84ee730b-5dc4-4bbb-b817-4f65942865b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" Nov 24 13:54:16 crc kubenswrapper[5039]: I1124 13:54:16.174042 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkvbk\" (UniqueName: \"kubernetes.io/projected/84ee730b-5dc4-4bbb-b817-4f65942865b6-kube-api-access-xkvbk\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb\" (UID: \"84ee730b-5dc4-4bbb-b817-4f65942865b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" Nov 24 13:54:16 crc kubenswrapper[5039]: I1124 13:54:16.176959 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb\" (UID: \"84ee730b-5dc4-4bbb-b817-4f65942865b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" Nov 24 13:54:16 crc kubenswrapper[5039]: I1124 13:54:16.177089 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb\" (UID: \"84ee730b-5dc4-4bbb-b817-4f65942865b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" Nov 24 13:54:16 crc kubenswrapper[5039]: I1124 13:54:16.177910 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb\" (UID: \"84ee730b-5dc4-4bbb-b817-4f65942865b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" Nov 24 13:54:16 crc kubenswrapper[5039]: I1124 13:54:16.178550 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb\" (UID: \"84ee730b-5dc4-4bbb-b817-4f65942865b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" Nov 24 13:54:16 crc kubenswrapper[5039]: I1124 13:54:16.190542 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkvbk\" (UniqueName: \"kubernetes.io/projected/84ee730b-5dc4-4bbb-b817-4f65942865b6-kube-api-access-xkvbk\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb\" (UID: \"84ee730b-5dc4-4bbb-b817-4f65942865b6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" Nov 24 13:54:16 crc kubenswrapper[5039]: I1124 13:54:16.191137 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" Nov 24 13:54:16 crc kubenswrapper[5039]: I1124 13:54:16.781661 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb"] Nov 24 13:54:16 crc kubenswrapper[5039]: I1124 13:54:16.790804 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 13:54:16 crc kubenswrapper[5039]: I1124 13:54:16.807709 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" event={"ID":"84ee730b-5dc4-4bbb-b817-4f65942865b6","Type":"ContainerStarted","Data":"170dc1e3e791e0b0aeb673fd7fa783de21700728c1dae72a5b994b4a28c6e895"} Nov 24 13:54:17 crc kubenswrapper[5039]: I1124 13:54:17.820458 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" event={"ID":"84ee730b-5dc4-4bbb-b817-4f65942865b6","Type":"ContainerStarted","Data":"fc5c312e28172a9c643ab8e61d40fa5cdac4a6a87edb94215824402e78dcf1f7"} Nov 24 13:54:17 crc kubenswrapper[5039]: I1124 13:54:17.849443 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" podStartSLOduration=2.376236542 podStartE2EDuration="2.849420946s" podCreationTimestamp="2025-11-24 13:54:15 +0000 UTC" firstStartedPulling="2025-11-24 13:54:16.790537294 +0000 UTC m=+2169.229661804" lastFinishedPulling="2025-11-24 13:54:17.263721708 +0000 UTC m=+2169.702846208" observedRunningTime="2025-11-24 13:54:17.836756356 +0000 UTC m=+2170.275880906" watchObservedRunningTime="2025-11-24 13:54:17.849420946 +0000 UTC m=+2170.288545436" Nov 24 13:54:20 crc kubenswrapper[5039]: I1124 13:54:20.101757 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:54:20 crc kubenswrapper[5039]: I1124 13:54:20.102054 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:54:50 crc kubenswrapper[5039]: I1124 13:54:50.101319 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 13:54:50 crc kubenswrapper[5039]: I1124 13:54:50.102051 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 13:54:50 crc kubenswrapper[5039]: I1124 13:54:50.102105 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 13:54:50 crc kubenswrapper[5039]: I1124 13:54:50.103135 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 13:54:50 crc kubenswrapper[5039]: I1124 13:54:50.103219 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" gracePeriod=600 Nov 24 13:54:50 crc kubenswrapper[5039]: E1124 13:54:50.244857 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:54:51 crc kubenswrapper[5039]: I1124 13:54:51.152320 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" exitCode=0 Nov 24 13:54:51 crc kubenswrapper[5039]: I1124 13:54:51.152408 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451"} Nov 24 13:54:51 crc kubenswrapper[5039]: I1124 13:54:51.152598 5039 scope.go:117] "RemoveContainer" containerID="43f9e17fdc829b04a1d158fb340e5b63c9b87b25d3decfdb862bbf4e2559df49" Nov 24 13:54:51 crc kubenswrapper[5039]: I1124 13:54:51.153234 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:54:51 crc kubenswrapper[5039]: E1124 13:54:51.153474 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:55:03 crc kubenswrapper[5039]: I1124 13:55:03.308117 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:55:03 crc kubenswrapper[5039]: E1124 13:55:03.309475 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:55:06 crc kubenswrapper[5039]: I1124 13:55:06.055520 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-bgfrt"] Nov 24 13:55:06 crc kubenswrapper[5039]: I1124 13:55:06.069002 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-bgfrt"] Nov 24 13:55:06 crc kubenswrapper[5039]: I1124 13:55:06.324236 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0be1e28f-a5d0-4685-b76f-5e074a81fe93" path="/var/lib/kubelet/pods/0be1e28f-a5d0-4685-b76f-5e074a81fe93/volumes" Nov 24 13:55:17 crc kubenswrapper[5039]: I1124 13:55:17.307374 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:55:17 crc kubenswrapper[5039]: E1124 13:55:17.308351 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:55:29 crc kubenswrapper[5039]: I1124 13:55:29.307579 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:55:29 crc kubenswrapper[5039]: E1124 13:55:29.308798 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:55:43 crc kubenswrapper[5039]: I1124 13:55:43.307047 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:55:43 crc kubenswrapper[5039]: E1124 13:55:43.308059 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:55:48 crc kubenswrapper[5039]: I1124 13:55:48.057822 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-g5jcx"] Nov 24 13:55:48 crc kubenswrapper[5039]: I1124 13:55:48.069056 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-g5jcx"] Nov 24 13:55:48 crc kubenswrapper[5039]: I1124 13:55:48.321875 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="831424c5-4dbf-4e75-871a-be5c0c7d64a2" path="/var/lib/kubelet/pods/831424c5-4dbf-4e75-871a-be5c0c7d64a2/volumes" Nov 24 13:55:50 crc kubenswrapper[5039]: I1124 13:55:50.058703 5039 scope.go:117] "RemoveContainer" containerID="4bc284743209699ddfbf620b48a7ffbb657c3c40f2a03c998f1c814e65c4e3eb" Nov 24 13:55:50 crc kubenswrapper[5039]: I1124 13:55:50.100864 5039 scope.go:117] "RemoveContainer" containerID="ae678a50eb009e9a11df09f527744b2e15e03d51b5d029f49a5f53f97755fe1a" Nov 24 13:55:58 crc kubenswrapper[5039]: I1124 13:55:58.318761 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:55:58 crc kubenswrapper[5039]: E1124 13:55:58.319736 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:56:13 crc kubenswrapper[5039]: I1124 13:56:13.307083 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:56:13 crc kubenswrapper[5039]: E1124 13:56:13.308000 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:56:28 crc kubenswrapper[5039]: I1124 13:56:28.316319 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:56:28 crc kubenswrapper[5039]: E1124 13:56:28.317177 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:56:40 crc kubenswrapper[5039]: I1124 13:56:40.306609 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:56:40 crc kubenswrapper[5039]: E1124 13:56:40.307307 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:56:52 crc kubenswrapper[5039]: I1124 13:56:52.306554 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:56:52 crc kubenswrapper[5039]: E1124 13:56:52.307313 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:57:03 crc kubenswrapper[5039]: I1124 13:57:03.307570 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:57:03 crc kubenswrapper[5039]: E1124 13:57:03.308433 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:57:14 crc kubenswrapper[5039]: I1124 13:57:14.307706 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:57:14 crc kubenswrapper[5039]: E1124 13:57:14.310135 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:57:26 crc kubenswrapper[5039]: I1124 13:57:26.307965 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:57:26 crc kubenswrapper[5039]: E1124 13:57:26.308881 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:57:39 crc kubenswrapper[5039]: I1124 13:57:39.306907 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:57:39 crc kubenswrapper[5039]: E1124 13:57:39.307857 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:57:54 crc kubenswrapper[5039]: I1124 13:57:54.306723 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:57:54 crc kubenswrapper[5039]: E1124 13:57:54.307710 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:58:07 crc kubenswrapper[5039]: I1124 13:58:07.307821 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:58:07 crc kubenswrapper[5039]: E1124 13:58:07.309020 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:58:13 crc kubenswrapper[5039]: I1124 13:58:13.056525 5039 generic.go:334] "Generic (PLEG): container finished" podID="84ee730b-5dc4-4bbb-b817-4f65942865b6" containerID="fc5c312e28172a9c643ab8e61d40fa5cdac4a6a87edb94215824402e78dcf1f7" exitCode=0 Nov 24 13:58:13 crc kubenswrapper[5039]: I1124 13:58:13.056650 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" event={"ID":"84ee730b-5dc4-4bbb-b817-4f65942865b6","Type":"ContainerDied","Data":"fc5c312e28172a9c643ab8e61d40fa5cdac4a6a87edb94215824402e78dcf1f7"} Nov 24 13:58:14 crc kubenswrapper[5039]: I1124 13:58:14.617251 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" Nov 24 13:58:14 crc kubenswrapper[5039]: I1124 13:58:14.691761 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-libvirt-combined-ca-bundle\") pod \"84ee730b-5dc4-4bbb-b817-4f65942865b6\" (UID: \"84ee730b-5dc4-4bbb-b817-4f65942865b6\") " Nov 24 13:58:14 crc kubenswrapper[5039]: I1124 13:58:14.691926 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-libvirt-secret-0\") pod \"84ee730b-5dc4-4bbb-b817-4f65942865b6\" (UID: \"84ee730b-5dc4-4bbb-b817-4f65942865b6\") " Nov 24 13:58:14 crc kubenswrapper[5039]: I1124 13:58:14.692053 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xkvbk\" (UniqueName: \"kubernetes.io/projected/84ee730b-5dc4-4bbb-b817-4f65942865b6-kube-api-access-xkvbk\") pod \"84ee730b-5dc4-4bbb-b817-4f65942865b6\" (UID: \"84ee730b-5dc4-4bbb-b817-4f65942865b6\") " Nov 24 13:58:14 crc kubenswrapper[5039]: I1124 13:58:14.692100 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-inventory\") pod \"84ee730b-5dc4-4bbb-b817-4f65942865b6\" (UID: \"84ee730b-5dc4-4bbb-b817-4f65942865b6\") " Nov 24 13:58:14 crc kubenswrapper[5039]: I1124 13:58:14.692193 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-ssh-key\") pod \"84ee730b-5dc4-4bbb-b817-4f65942865b6\" (UID: \"84ee730b-5dc4-4bbb-b817-4f65942865b6\") " Nov 24 13:58:14 crc kubenswrapper[5039]: I1124 13:58:14.697141 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84ee730b-5dc4-4bbb-b817-4f65942865b6-kube-api-access-xkvbk" (OuterVolumeSpecName: "kube-api-access-xkvbk") pod "84ee730b-5dc4-4bbb-b817-4f65942865b6" (UID: "84ee730b-5dc4-4bbb-b817-4f65942865b6"). InnerVolumeSpecName "kube-api-access-xkvbk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:58:14 crc kubenswrapper[5039]: I1124 13:58:14.697743 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "84ee730b-5dc4-4bbb-b817-4f65942865b6" (UID: "84ee730b-5dc4-4bbb-b817-4f65942865b6"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:58:14 crc kubenswrapper[5039]: I1124 13:58:14.721463 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-inventory" (OuterVolumeSpecName: "inventory") pod "84ee730b-5dc4-4bbb-b817-4f65942865b6" (UID: "84ee730b-5dc4-4bbb-b817-4f65942865b6"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:58:14 crc kubenswrapper[5039]: I1124 13:58:14.722131 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "84ee730b-5dc4-4bbb-b817-4f65942865b6" (UID: "84ee730b-5dc4-4bbb-b817-4f65942865b6"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:58:14 crc kubenswrapper[5039]: I1124 13:58:14.729940 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "84ee730b-5dc4-4bbb-b817-4f65942865b6" (UID: "84ee730b-5dc4-4bbb-b817-4f65942865b6"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 13:58:14 crc kubenswrapper[5039]: I1124 13:58:14.794639 5039 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 13:58:14 crc kubenswrapper[5039]: I1124 13:58:14.794692 5039 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 24 13:58:14 crc kubenswrapper[5039]: I1124 13:58:14.794703 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xkvbk\" (UniqueName: \"kubernetes.io/projected/84ee730b-5dc4-4bbb-b817-4f65942865b6-kube-api-access-xkvbk\") on node \"crc\" DevicePath \"\"" Nov 24 13:58:14 crc kubenswrapper[5039]: I1124 13:58:14.794714 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 13:58:14 crc kubenswrapper[5039]: I1124 13:58:14.794722 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84ee730b-5dc4-4bbb-b817-4f65942865b6-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.081164 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" event={"ID":"84ee730b-5dc4-4bbb-b817-4f65942865b6","Type":"ContainerDied","Data":"170dc1e3e791e0b0aeb673fd7fa783de21700728c1dae72a5b994b4a28c6e895"} Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.081217 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="170dc1e3e791e0b0aeb673fd7fa783de21700728c1dae72a5b994b4a28c6e895" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.081238 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.196755 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h"] Nov 24 13:58:15 crc kubenswrapper[5039]: E1124 13:58:15.197525 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84ee730b-5dc4-4bbb-b817-4f65942865b6" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.197545 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="84ee730b-5dc4-4bbb-b817-4f65942865b6" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.197775 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="84ee730b-5dc4-4bbb-b817-4f65942865b6" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.198557 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.207794 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.208022 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.208875 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.209168 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.209476 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.222333 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h"] Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.316852 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-xl22h\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.316965 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-xl22h\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.317046 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-xl22h\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.317123 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgqcv\" (UniqueName: \"kubernetes.io/projected/be684f03-c6b3-4538-a113-1c4a1873dc96-kube-api-access-cgqcv\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-xl22h\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.317173 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-xl22h\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.317207 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-xl22h\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.317342 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-xl22h\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.419196 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-xl22h\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.419299 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-xl22h\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.419374 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgqcv\" (UniqueName: \"kubernetes.io/projected/be684f03-c6b3-4538-a113-1c4a1873dc96-kube-api-access-cgqcv\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-xl22h\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.419408 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-xl22h\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.419431 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-xl22h\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.419600 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-xl22h\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.419682 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-xl22h\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.423155 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-xl22h\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.424115 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-xl22h\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.424434 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-xl22h\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.425275 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-xl22h\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.425639 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-xl22h\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.438070 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-xl22h\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.442205 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgqcv\" (UniqueName: \"kubernetes.io/projected/be684f03-c6b3-4538-a113-1c4a1873dc96-kube-api-access-cgqcv\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-xl22h\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:15 crc kubenswrapper[5039]: I1124 13:58:15.524736 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 13:58:16 crc kubenswrapper[5039]: I1124 13:58:16.084134 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h"] Nov 24 13:58:16 crc kubenswrapper[5039]: I1124 13:58:16.096067 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" event={"ID":"be684f03-c6b3-4538-a113-1c4a1873dc96","Type":"ContainerStarted","Data":"f2a35cb4d6565b1e281935575d28d2a98ccc16d6497ca623f441fd5b16c674ff"} Nov 24 13:58:17 crc kubenswrapper[5039]: I1124 13:58:17.112250 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" event={"ID":"be684f03-c6b3-4538-a113-1c4a1873dc96","Type":"ContainerStarted","Data":"69b7bd167c07f9152a849ca1f79e50d791343172ab45112561b89008bf4618f8"} Nov 24 13:58:17 crc kubenswrapper[5039]: I1124 13:58:17.133629 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" podStartSLOduration=1.656416631 podStartE2EDuration="2.133603087s" podCreationTimestamp="2025-11-24 13:58:15 +0000 UTC" firstStartedPulling="2025-11-24 13:58:16.087098553 +0000 UTC m=+2408.526223053" lastFinishedPulling="2025-11-24 13:58:16.564284989 +0000 UTC m=+2409.003409509" observedRunningTime="2025-11-24 13:58:17.132872528 +0000 UTC m=+2409.571997048" watchObservedRunningTime="2025-11-24 13:58:17.133603087 +0000 UTC m=+2409.572727597" Nov 24 13:58:20 crc kubenswrapper[5039]: I1124 13:58:20.307183 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:58:20 crc kubenswrapper[5039]: E1124 13:58:20.307779 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:58:32 crc kubenswrapper[5039]: I1124 13:58:32.307627 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:58:32 crc kubenswrapper[5039]: E1124 13:58:32.308429 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:58:43 crc kubenswrapper[5039]: I1124 13:58:43.307195 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:58:43 crc kubenswrapper[5039]: E1124 13:58:43.308073 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:58:54 crc kubenswrapper[5039]: I1124 13:58:54.307497 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:58:54 crc kubenswrapper[5039]: E1124 13:58:54.308300 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:59:09 crc kubenswrapper[5039]: I1124 13:59:09.306394 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:59:09 crc kubenswrapper[5039]: E1124 13:59:09.307047 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:59:14 crc kubenswrapper[5039]: I1124 13:59:14.965221 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bnr64"] Nov 24 13:59:14 crc kubenswrapper[5039]: I1124 13:59:14.967947 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bnr64" Nov 24 13:59:14 crc kubenswrapper[5039]: I1124 13:59:14.977496 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bnr64"] Nov 24 13:59:15 crc kubenswrapper[5039]: I1124 13:59:15.106554 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25l6h\" (UniqueName: \"kubernetes.io/projected/c18cd2cc-a59b-4f16-bb59-f4536b737ef5-kube-api-access-25l6h\") pod \"redhat-operators-bnr64\" (UID: \"c18cd2cc-a59b-4f16-bb59-f4536b737ef5\") " pod="openshift-marketplace/redhat-operators-bnr64" Nov 24 13:59:15 crc kubenswrapper[5039]: I1124 13:59:15.106707 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c18cd2cc-a59b-4f16-bb59-f4536b737ef5-catalog-content\") pod \"redhat-operators-bnr64\" (UID: \"c18cd2cc-a59b-4f16-bb59-f4536b737ef5\") " pod="openshift-marketplace/redhat-operators-bnr64" Nov 24 13:59:15 crc kubenswrapper[5039]: I1124 13:59:15.107032 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c18cd2cc-a59b-4f16-bb59-f4536b737ef5-utilities\") pod \"redhat-operators-bnr64\" (UID: \"c18cd2cc-a59b-4f16-bb59-f4536b737ef5\") " pod="openshift-marketplace/redhat-operators-bnr64" Nov 24 13:59:15 crc kubenswrapper[5039]: I1124 13:59:15.210184 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25l6h\" (UniqueName: \"kubernetes.io/projected/c18cd2cc-a59b-4f16-bb59-f4536b737ef5-kube-api-access-25l6h\") pod \"redhat-operators-bnr64\" (UID: \"c18cd2cc-a59b-4f16-bb59-f4536b737ef5\") " pod="openshift-marketplace/redhat-operators-bnr64" Nov 24 13:59:15 crc kubenswrapper[5039]: I1124 13:59:15.210275 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c18cd2cc-a59b-4f16-bb59-f4536b737ef5-catalog-content\") pod \"redhat-operators-bnr64\" (UID: \"c18cd2cc-a59b-4f16-bb59-f4536b737ef5\") " pod="openshift-marketplace/redhat-operators-bnr64" Nov 24 13:59:15 crc kubenswrapper[5039]: I1124 13:59:15.210341 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c18cd2cc-a59b-4f16-bb59-f4536b737ef5-utilities\") pod \"redhat-operators-bnr64\" (UID: \"c18cd2cc-a59b-4f16-bb59-f4536b737ef5\") " pod="openshift-marketplace/redhat-operators-bnr64" Nov 24 13:59:15 crc kubenswrapper[5039]: I1124 13:59:15.211043 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c18cd2cc-a59b-4f16-bb59-f4536b737ef5-catalog-content\") pod \"redhat-operators-bnr64\" (UID: \"c18cd2cc-a59b-4f16-bb59-f4536b737ef5\") " pod="openshift-marketplace/redhat-operators-bnr64" Nov 24 13:59:15 crc kubenswrapper[5039]: I1124 13:59:15.211055 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c18cd2cc-a59b-4f16-bb59-f4536b737ef5-utilities\") pod \"redhat-operators-bnr64\" (UID: \"c18cd2cc-a59b-4f16-bb59-f4536b737ef5\") " pod="openshift-marketplace/redhat-operators-bnr64" Nov 24 13:59:15 crc kubenswrapper[5039]: I1124 13:59:15.234144 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25l6h\" (UniqueName: \"kubernetes.io/projected/c18cd2cc-a59b-4f16-bb59-f4536b737ef5-kube-api-access-25l6h\") pod \"redhat-operators-bnr64\" (UID: \"c18cd2cc-a59b-4f16-bb59-f4536b737ef5\") " pod="openshift-marketplace/redhat-operators-bnr64" Nov 24 13:59:15 crc kubenswrapper[5039]: I1124 13:59:15.289827 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bnr64" Nov 24 13:59:15 crc kubenswrapper[5039]: I1124 13:59:15.783049 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bnr64"] Nov 24 13:59:15 crc kubenswrapper[5039]: I1124 13:59:15.927229 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bnr64" event={"ID":"c18cd2cc-a59b-4f16-bb59-f4536b737ef5","Type":"ContainerStarted","Data":"b75bc4586505399ef330737af53c6bcbcd31a4b6b822ca543b627899bd23990b"} Nov 24 13:59:16 crc kubenswrapper[5039]: I1124 13:59:16.937032 5039 generic.go:334] "Generic (PLEG): container finished" podID="c18cd2cc-a59b-4f16-bb59-f4536b737ef5" containerID="ebb2a38daef17c68ffad0ee219ac36094a67287d3e3f535accb451aef6aef142" exitCode=0 Nov 24 13:59:16 crc kubenswrapper[5039]: I1124 13:59:16.937091 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bnr64" event={"ID":"c18cd2cc-a59b-4f16-bb59-f4536b737ef5","Type":"ContainerDied","Data":"ebb2a38daef17c68ffad0ee219ac36094a67287d3e3f535accb451aef6aef142"} Nov 24 13:59:16 crc kubenswrapper[5039]: I1124 13:59:16.939966 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 13:59:22 crc kubenswrapper[5039]: I1124 13:59:22.306960 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:59:22 crc kubenswrapper[5039]: E1124 13:59:22.307693 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:59:28 crc kubenswrapper[5039]: I1124 13:59:28.043163 5039 generic.go:334] "Generic (PLEG): container finished" podID="c18cd2cc-a59b-4f16-bb59-f4536b737ef5" containerID="bb370a2a572d2a169281009cb8e233e12c7a1c7bffc62eca0484635a10b5dd50" exitCode=0 Nov 24 13:59:28 crc kubenswrapper[5039]: I1124 13:59:28.043223 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bnr64" event={"ID":"c18cd2cc-a59b-4f16-bb59-f4536b737ef5","Type":"ContainerDied","Data":"bb370a2a572d2a169281009cb8e233e12c7a1c7bffc62eca0484635a10b5dd50"} Nov 24 13:59:29 crc kubenswrapper[5039]: I1124 13:59:29.056641 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bnr64" event={"ID":"c18cd2cc-a59b-4f16-bb59-f4536b737ef5","Type":"ContainerStarted","Data":"eae40f5f66686204735982d721f21361d3c56a41a6199b74e40a2887e2286189"} Nov 24 13:59:29 crc kubenswrapper[5039]: I1124 13:59:29.090969 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bnr64" podStartSLOduration=3.346472604 podStartE2EDuration="15.090944924s" podCreationTimestamp="2025-11-24 13:59:14 +0000 UTC" firstStartedPulling="2025-11-24 13:59:16.939577925 +0000 UTC m=+2469.378702465" lastFinishedPulling="2025-11-24 13:59:28.684050285 +0000 UTC m=+2481.123174785" observedRunningTime="2025-11-24 13:59:29.082860164 +0000 UTC m=+2481.521984664" watchObservedRunningTime="2025-11-24 13:59:29.090944924 +0000 UTC m=+2481.530069444" Nov 24 13:59:34 crc kubenswrapper[5039]: I1124 13:59:34.307059 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:59:34 crc kubenswrapper[5039]: E1124 13:59:34.308221 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:59:35 crc kubenswrapper[5039]: I1124 13:59:35.290379 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bnr64" Nov 24 13:59:35 crc kubenswrapper[5039]: I1124 13:59:35.290445 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bnr64" Nov 24 13:59:35 crc kubenswrapper[5039]: I1124 13:59:35.338829 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bnr64" Nov 24 13:59:36 crc kubenswrapper[5039]: I1124 13:59:36.186172 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bnr64" Nov 24 13:59:36 crc kubenswrapper[5039]: I1124 13:59:36.248823 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bnr64"] Nov 24 13:59:36 crc kubenswrapper[5039]: I1124 13:59:36.320753 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kf4pl"] Nov 24 13:59:36 crc kubenswrapper[5039]: I1124 13:59:36.321007 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-kf4pl" podUID="4d398306-3543-4f30-90a9-1f39fd5c58e4" containerName="registry-server" containerID="cri-o://0d0b895a076de61f0fb370c78da0712ebe2a20b5b0e7b3ef0056d3d9d07e2b74" gracePeriod=2 Nov 24 13:59:36 crc kubenswrapper[5039]: I1124 13:59:36.889105 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kf4pl" Nov 24 13:59:36 crc kubenswrapper[5039]: I1124 13:59:36.968352 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d398306-3543-4f30-90a9-1f39fd5c58e4-catalog-content\") pod \"4d398306-3543-4f30-90a9-1f39fd5c58e4\" (UID: \"4d398306-3543-4f30-90a9-1f39fd5c58e4\") " Nov 24 13:59:36 crc kubenswrapper[5039]: I1124 13:59:36.968433 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d398306-3543-4f30-90a9-1f39fd5c58e4-utilities\") pod \"4d398306-3543-4f30-90a9-1f39fd5c58e4\" (UID: \"4d398306-3543-4f30-90a9-1f39fd5c58e4\") " Nov 24 13:59:36 crc kubenswrapper[5039]: I1124 13:59:36.968585 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtmqt\" (UniqueName: \"kubernetes.io/projected/4d398306-3543-4f30-90a9-1f39fd5c58e4-kube-api-access-gtmqt\") pod \"4d398306-3543-4f30-90a9-1f39fd5c58e4\" (UID: \"4d398306-3543-4f30-90a9-1f39fd5c58e4\") " Nov 24 13:59:36 crc kubenswrapper[5039]: I1124 13:59:36.971993 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d398306-3543-4f30-90a9-1f39fd5c58e4-utilities" (OuterVolumeSpecName: "utilities") pod "4d398306-3543-4f30-90a9-1f39fd5c58e4" (UID: "4d398306-3543-4f30-90a9-1f39fd5c58e4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:59:37 crc kubenswrapper[5039]: I1124 13:59:37.004375 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d398306-3543-4f30-90a9-1f39fd5c58e4-kube-api-access-gtmqt" (OuterVolumeSpecName: "kube-api-access-gtmqt") pod "4d398306-3543-4f30-90a9-1f39fd5c58e4" (UID: "4d398306-3543-4f30-90a9-1f39fd5c58e4"). InnerVolumeSpecName "kube-api-access-gtmqt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 13:59:37 crc kubenswrapper[5039]: I1124 13:59:37.073639 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d398306-3543-4f30-90a9-1f39fd5c58e4-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 13:59:37 crc kubenswrapper[5039]: I1124 13:59:37.073682 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtmqt\" (UniqueName: \"kubernetes.io/projected/4d398306-3543-4f30-90a9-1f39fd5c58e4-kube-api-access-gtmqt\") on node \"crc\" DevicePath \"\"" Nov 24 13:59:37 crc kubenswrapper[5039]: I1124 13:59:37.083672 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d398306-3543-4f30-90a9-1f39fd5c58e4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4d398306-3543-4f30-90a9-1f39fd5c58e4" (UID: "4d398306-3543-4f30-90a9-1f39fd5c58e4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 13:59:37 crc kubenswrapper[5039]: I1124 13:59:37.151733 5039 generic.go:334] "Generic (PLEG): container finished" podID="4d398306-3543-4f30-90a9-1f39fd5c58e4" containerID="0d0b895a076de61f0fb370c78da0712ebe2a20b5b0e7b3ef0056d3d9d07e2b74" exitCode=0 Nov 24 13:59:37 crc kubenswrapper[5039]: I1124 13:59:37.151798 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kf4pl" Nov 24 13:59:37 crc kubenswrapper[5039]: I1124 13:59:37.151848 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kf4pl" event={"ID":"4d398306-3543-4f30-90a9-1f39fd5c58e4","Type":"ContainerDied","Data":"0d0b895a076de61f0fb370c78da0712ebe2a20b5b0e7b3ef0056d3d9d07e2b74"} Nov 24 13:59:37 crc kubenswrapper[5039]: I1124 13:59:37.151907 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kf4pl" event={"ID":"4d398306-3543-4f30-90a9-1f39fd5c58e4","Type":"ContainerDied","Data":"258d825aab874221699eccf2a17116ed99e5692a23d6809f9662c346082fa174"} Nov 24 13:59:37 crc kubenswrapper[5039]: I1124 13:59:37.151930 5039 scope.go:117] "RemoveContainer" containerID="0d0b895a076de61f0fb370c78da0712ebe2a20b5b0e7b3ef0056d3d9d07e2b74" Nov 24 13:59:37 crc kubenswrapper[5039]: I1124 13:59:37.175294 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d398306-3543-4f30-90a9-1f39fd5c58e4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 13:59:37 crc kubenswrapper[5039]: I1124 13:59:37.191347 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kf4pl"] Nov 24 13:59:37 crc kubenswrapper[5039]: I1124 13:59:37.191653 5039 scope.go:117] "RemoveContainer" containerID="5cce04a09b2f9520dc740cfbd6abe92716ee6394ff8322389aeb73a316dc9fbc" Nov 24 13:59:37 crc kubenswrapper[5039]: I1124 13:59:37.201725 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-kf4pl"] Nov 24 13:59:37 crc kubenswrapper[5039]: I1124 13:59:37.239007 5039 scope.go:117] "RemoveContainer" containerID="6d70e3628a71e0e873173b796fe1badfae50972547e093e3f2dc136a49dfa41a" Nov 24 13:59:37 crc kubenswrapper[5039]: I1124 13:59:37.278588 5039 scope.go:117] "RemoveContainer" containerID="0d0b895a076de61f0fb370c78da0712ebe2a20b5b0e7b3ef0056d3d9d07e2b74" Nov 24 13:59:37 crc kubenswrapper[5039]: E1124 13:59:37.279339 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d0b895a076de61f0fb370c78da0712ebe2a20b5b0e7b3ef0056d3d9d07e2b74\": container with ID starting with 0d0b895a076de61f0fb370c78da0712ebe2a20b5b0e7b3ef0056d3d9d07e2b74 not found: ID does not exist" containerID="0d0b895a076de61f0fb370c78da0712ebe2a20b5b0e7b3ef0056d3d9d07e2b74" Nov 24 13:59:37 crc kubenswrapper[5039]: I1124 13:59:37.279373 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d0b895a076de61f0fb370c78da0712ebe2a20b5b0e7b3ef0056d3d9d07e2b74"} err="failed to get container status \"0d0b895a076de61f0fb370c78da0712ebe2a20b5b0e7b3ef0056d3d9d07e2b74\": rpc error: code = NotFound desc = could not find container \"0d0b895a076de61f0fb370c78da0712ebe2a20b5b0e7b3ef0056d3d9d07e2b74\": container with ID starting with 0d0b895a076de61f0fb370c78da0712ebe2a20b5b0e7b3ef0056d3d9d07e2b74 not found: ID does not exist" Nov 24 13:59:37 crc kubenswrapper[5039]: I1124 13:59:37.279400 5039 scope.go:117] "RemoveContainer" containerID="5cce04a09b2f9520dc740cfbd6abe92716ee6394ff8322389aeb73a316dc9fbc" Nov 24 13:59:37 crc kubenswrapper[5039]: E1124 13:59:37.279731 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cce04a09b2f9520dc740cfbd6abe92716ee6394ff8322389aeb73a316dc9fbc\": container with ID starting with 5cce04a09b2f9520dc740cfbd6abe92716ee6394ff8322389aeb73a316dc9fbc not found: ID does not exist" containerID="5cce04a09b2f9520dc740cfbd6abe92716ee6394ff8322389aeb73a316dc9fbc" Nov 24 13:59:37 crc kubenswrapper[5039]: I1124 13:59:37.279762 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cce04a09b2f9520dc740cfbd6abe92716ee6394ff8322389aeb73a316dc9fbc"} err="failed to get container status \"5cce04a09b2f9520dc740cfbd6abe92716ee6394ff8322389aeb73a316dc9fbc\": rpc error: code = NotFound desc = could not find container \"5cce04a09b2f9520dc740cfbd6abe92716ee6394ff8322389aeb73a316dc9fbc\": container with ID starting with 5cce04a09b2f9520dc740cfbd6abe92716ee6394ff8322389aeb73a316dc9fbc not found: ID does not exist" Nov 24 13:59:37 crc kubenswrapper[5039]: I1124 13:59:37.279778 5039 scope.go:117] "RemoveContainer" containerID="6d70e3628a71e0e873173b796fe1badfae50972547e093e3f2dc136a49dfa41a" Nov 24 13:59:37 crc kubenswrapper[5039]: E1124 13:59:37.279968 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d70e3628a71e0e873173b796fe1badfae50972547e093e3f2dc136a49dfa41a\": container with ID starting with 6d70e3628a71e0e873173b796fe1badfae50972547e093e3f2dc136a49dfa41a not found: ID does not exist" containerID="6d70e3628a71e0e873173b796fe1badfae50972547e093e3f2dc136a49dfa41a" Nov 24 13:59:37 crc kubenswrapper[5039]: I1124 13:59:37.279992 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d70e3628a71e0e873173b796fe1badfae50972547e093e3f2dc136a49dfa41a"} err="failed to get container status \"6d70e3628a71e0e873173b796fe1badfae50972547e093e3f2dc136a49dfa41a\": rpc error: code = NotFound desc = could not find container \"6d70e3628a71e0e873173b796fe1badfae50972547e093e3f2dc136a49dfa41a\": container with ID starting with 6d70e3628a71e0e873173b796fe1badfae50972547e093e3f2dc136a49dfa41a not found: ID does not exist" Nov 24 13:59:38 crc kubenswrapper[5039]: I1124 13:59:38.318809 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d398306-3543-4f30-90a9-1f39fd5c58e4" path="/var/lib/kubelet/pods/4d398306-3543-4f30-90a9-1f39fd5c58e4/volumes" Nov 24 13:59:46 crc kubenswrapper[5039]: I1124 13:59:46.307303 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:59:46 crc kubenswrapper[5039]: E1124 13:59:46.308661 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 13:59:57 crc kubenswrapper[5039]: I1124 13:59:57.307339 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 13:59:58 crc kubenswrapper[5039]: I1124 13:59:58.400485 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"f4df9e2b4ae7b3d6fb4f3a538dbbb6b8373edbff29ee3cbb674417bf32f805d0"} Nov 24 14:00:00 crc kubenswrapper[5039]: I1124 14:00:00.159901 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm"] Nov 24 14:00:00 crc kubenswrapper[5039]: E1124 14:00:00.160851 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d398306-3543-4f30-90a9-1f39fd5c58e4" containerName="extract-content" Nov 24 14:00:00 crc kubenswrapper[5039]: I1124 14:00:00.160864 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d398306-3543-4f30-90a9-1f39fd5c58e4" containerName="extract-content" Nov 24 14:00:00 crc kubenswrapper[5039]: E1124 14:00:00.160916 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d398306-3543-4f30-90a9-1f39fd5c58e4" containerName="extract-utilities" Nov 24 14:00:00 crc kubenswrapper[5039]: I1124 14:00:00.160923 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d398306-3543-4f30-90a9-1f39fd5c58e4" containerName="extract-utilities" Nov 24 14:00:00 crc kubenswrapper[5039]: E1124 14:00:00.160934 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d398306-3543-4f30-90a9-1f39fd5c58e4" containerName="registry-server" Nov 24 14:00:00 crc kubenswrapper[5039]: I1124 14:00:00.160941 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d398306-3543-4f30-90a9-1f39fd5c58e4" containerName="registry-server" Nov 24 14:00:00 crc kubenswrapper[5039]: I1124 14:00:00.161182 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d398306-3543-4f30-90a9-1f39fd5c58e4" containerName="registry-server" Nov 24 14:00:00 crc kubenswrapper[5039]: I1124 14:00:00.162004 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm" Nov 24 14:00:00 crc kubenswrapper[5039]: I1124 14:00:00.165157 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 14:00:00 crc kubenswrapper[5039]: I1124 14:00:00.166974 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 14:00:00 crc kubenswrapper[5039]: I1124 14:00:00.182813 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm"] Nov 24 14:00:00 crc kubenswrapper[5039]: I1124 14:00:00.284128 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b6828d14-20ef-48a1-91cb-c2c3e43744f0-config-volume\") pod \"collect-profiles-29399880-2nkfm\" (UID: \"b6828d14-20ef-48a1-91cb-c2c3e43744f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm" Nov 24 14:00:00 crc kubenswrapper[5039]: I1124 14:00:00.284282 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b6828d14-20ef-48a1-91cb-c2c3e43744f0-secret-volume\") pod \"collect-profiles-29399880-2nkfm\" (UID: \"b6828d14-20ef-48a1-91cb-c2c3e43744f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm" Nov 24 14:00:00 crc kubenswrapper[5039]: I1124 14:00:00.284357 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2gwxd\" (UniqueName: \"kubernetes.io/projected/b6828d14-20ef-48a1-91cb-c2c3e43744f0-kube-api-access-2gwxd\") pod \"collect-profiles-29399880-2nkfm\" (UID: \"b6828d14-20ef-48a1-91cb-c2c3e43744f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm" Nov 24 14:00:00 crc kubenswrapper[5039]: I1124 14:00:00.386594 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b6828d14-20ef-48a1-91cb-c2c3e43744f0-secret-volume\") pod \"collect-profiles-29399880-2nkfm\" (UID: \"b6828d14-20ef-48a1-91cb-c2c3e43744f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm" Nov 24 14:00:00 crc kubenswrapper[5039]: I1124 14:00:00.386685 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2gwxd\" (UniqueName: \"kubernetes.io/projected/b6828d14-20ef-48a1-91cb-c2c3e43744f0-kube-api-access-2gwxd\") pod \"collect-profiles-29399880-2nkfm\" (UID: \"b6828d14-20ef-48a1-91cb-c2c3e43744f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm" Nov 24 14:00:00 crc kubenswrapper[5039]: I1124 14:00:00.386769 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b6828d14-20ef-48a1-91cb-c2c3e43744f0-config-volume\") pod \"collect-profiles-29399880-2nkfm\" (UID: \"b6828d14-20ef-48a1-91cb-c2c3e43744f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm" Nov 24 14:00:00 crc kubenswrapper[5039]: I1124 14:00:00.387689 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b6828d14-20ef-48a1-91cb-c2c3e43744f0-config-volume\") pod \"collect-profiles-29399880-2nkfm\" (UID: \"b6828d14-20ef-48a1-91cb-c2c3e43744f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm" Nov 24 14:00:00 crc kubenswrapper[5039]: I1124 14:00:00.401598 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b6828d14-20ef-48a1-91cb-c2c3e43744f0-secret-volume\") pod \"collect-profiles-29399880-2nkfm\" (UID: \"b6828d14-20ef-48a1-91cb-c2c3e43744f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm" Nov 24 14:00:00 crc kubenswrapper[5039]: I1124 14:00:00.408170 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2gwxd\" (UniqueName: \"kubernetes.io/projected/b6828d14-20ef-48a1-91cb-c2c3e43744f0-kube-api-access-2gwxd\") pod \"collect-profiles-29399880-2nkfm\" (UID: \"b6828d14-20ef-48a1-91cb-c2c3e43744f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm" Nov 24 14:00:00 crc kubenswrapper[5039]: I1124 14:00:00.487555 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm" Nov 24 14:00:00 crc kubenswrapper[5039]: I1124 14:00:00.934963 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm"] Nov 24 14:00:00 crc kubenswrapper[5039]: W1124 14:00:00.939391 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb6828d14_20ef_48a1_91cb_c2c3e43744f0.slice/crio-0fe00f138720380b9cb60e68dfae58c338696dfcb10b5011d81fca25dc1fbfbe WatchSource:0}: Error finding container 0fe00f138720380b9cb60e68dfae58c338696dfcb10b5011d81fca25dc1fbfbe: Status 404 returned error can't find the container with id 0fe00f138720380b9cb60e68dfae58c338696dfcb10b5011d81fca25dc1fbfbe Nov 24 14:00:01 crc kubenswrapper[5039]: I1124 14:00:01.453189 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm" event={"ID":"b6828d14-20ef-48a1-91cb-c2c3e43744f0","Type":"ContainerStarted","Data":"f8de2432faf3a95e0b2d51a2c0f17621df6e8902438b2bc833eed563f41bda23"} Nov 24 14:00:01 crc kubenswrapper[5039]: I1124 14:00:01.453729 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm" event={"ID":"b6828d14-20ef-48a1-91cb-c2c3e43744f0","Type":"ContainerStarted","Data":"0fe00f138720380b9cb60e68dfae58c338696dfcb10b5011d81fca25dc1fbfbe"} Nov 24 14:00:01 crc kubenswrapper[5039]: I1124 14:00:01.470838 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm" podStartSLOduration=1.470820888 podStartE2EDuration="1.470820888s" podCreationTimestamp="2025-11-24 14:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 14:00:01.467453576 +0000 UTC m=+2513.906578086" watchObservedRunningTime="2025-11-24 14:00:01.470820888 +0000 UTC m=+2513.909945388" Nov 24 14:00:02 crc kubenswrapper[5039]: I1124 14:00:02.462840 5039 generic.go:334] "Generic (PLEG): container finished" podID="b6828d14-20ef-48a1-91cb-c2c3e43744f0" containerID="f8de2432faf3a95e0b2d51a2c0f17621df6e8902438b2bc833eed563f41bda23" exitCode=0 Nov 24 14:00:02 crc kubenswrapper[5039]: I1124 14:00:02.462872 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm" event={"ID":"b6828d14-20ef-48a1-91cb-c2c3e43744f0","Type":"ContainerDied","Data":"f8de2432faf3a95e0b2d51a2c0f17621df6e8902438b2bc833eed563f41bda23"} Nov 24 14:00:03 crc kubenswrapper[5039]: I1124 14:00:03.885846 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm" Nov 24 14:00:04 crc kubenswrapper[5039]: I1124 14:00:04.070678 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b6828d14-20ef-48a1-91cb-c2c3e43744f0-secret-volume\") pod \"b6828d14-20ef-48a1-91cb-c2c3e43744f0\" (UID: \"b6828d14-20ef-48a1-91cb-c2c3e43744f0\") " Nov 24 14:00:04 crc kubenswrapper[5039]: I1124 14:00:04.071130 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b6828d14-20ef-48a1-91cb-c2c3e43744f0-config-volume\") pod \"b6828d14-20ef-48a1-91cb-c2c3e43744f0\" (UID: \"b6828d14-20ef-48a1-91cb-c2c3e43744f0\") " Nov 24 14:00:04 crc kubenswrapper[5039]: I1124 14:00:04.071286 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2gwxd\" (UniqueName: \"kubernetes.io/projected/b6828d14-20ef-48a1-91cb-c2c3e43744f0-kube-api-access-2gwxd\") pod \"b6828d14-20ef-48a1-91cb-c2c3e43744f0\" (UID: \"b6828d14-20ef-48a1-91cb-c2c3e43744f0\") " Nov 24 14:00:04 crc kubenswrapper[5039]: I1124 14:00:04.072470 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6828d14-20ef-48a1-91cb-c2c3e43744f0-config-volume" (OuterVolumeSpecName: "config-volume") pod "b6828d14-20ef-48a1-91cb-c2c3e43744f0" (UID: "b6828d14-20ef-48a1-91cb-c2c3e43744f0"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 14:00:04 crc kubenswrapper[5039]: I1124 14:00:04.082445 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6828d14-20ef-48a1-91cb-c2c3e43744f0-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b6828d14-20ef-48a1-91cb-c2c3e43744f0" (UID: "b6828d14-20ef-48a1-91cb-c2c3e43744f0"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:00:04 crc kubenswrapper[5039]: I1124 14:00:04.082552 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6828d14-20ef-48a1-91cb-c2c3e43744f0-kube-api-access-2gwxd" (OuterVolumeSpecName: "kube-api-access-2gwxd") pod "b6828d14-20ef-48a1-91cb-c2c3e43744f0" (UID: "b6828d14-20ef-48a1-91cb-c2c3e43744f0"). InnerVolumeSpecName "kube-api-access-2gwxd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:00:04 crc kubenswrapper[5039]: I1124 14:00:04.174239 5039 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b6828d14-20ef-48a1-91cb-c2c3e43744f0-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 14:00:04 crc kubenswrapper[5039]: I1124 14:00:04.174281 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2gwxd\" (UniqueName: \"kubernetes.io/projected/b6828d14-20ef-48a1-91cb-c2c3e43744f0-kube-api-access-2gwxd\") on node \"crc\" DevicePath \"\"" Nov 24 14:00:04 crc kubenswrapper[5039]: I1124 14:00:04.174296 5039 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b6828d14-20ef-48a1-91cb-c2c3e43744f0-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 14:00:04 crc kubenswrapper[5039]: I1124 14:00:04.496462 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm" event={"ID":"b6828d14-20ef-48a1-91cb-c2c3e43744f0","Type":"ContainerDied","Data":"0fe00f138720380b9cb60e68dfae58c338696dfcb10b5011d81fca25dc1fbfbe"} Nov 24 14:00:04 crc kubenswrapper[5039]: I1124 14:00:04.496518 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0fe00f138720380b9cb60e68dfae58c338696dfcb10b5011d81fca25dc1fbfbe" Nov 24 14:00:04 crc kubenswrapper[5039]: I1124 14:00:04.496531 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm" Nov 24 14:00:04 crc kubenswrapper[5039]: I1124 14:00:04.555376 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54"] Nov 24 14:00:04 crc kubenswrapper[5039]: I1124 14:00:04.564989 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399835-8xp54"] Nov 24 14:00:06 crc kubenswrapper[5039]: I1124 14:00:06.329137 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6170f687-30e2-44b0-860e-ddcee4e4f2d4" path="/var/lib/kubelet/pods/6170f687-30e2-44b0-860e-ddcee4e4f2d4/volumes" Nov 24 14:00:39 crc kubenswrapper[5039]: I1124 14:00:39.876645 5039 generic.go:334] "Generic (PLEG): container finished" podID="be684f03-c6b3-4538-a113-1c4a1873dc96" containerID="69b7bd167c07f9152a849ca1f79e50d791343172ab45112561b89008bf4618f8" exitCode=0 Nov 24 14:00:39 crc kubenswrapper[5039]: I1124 14:00:39.876711 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" event={"ID":"be684f03-c6b3-4538-a113-1c4a1873dc96","Type":"ContainerDied","Data":"69b7bd167c07f9152a849ca1f79e50d791343172ab45112561b89008bf4618f8"} Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.438474 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.484984 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-telemetry-combined-ca-bundle\") pod \"be684f03-c6b3-4538-a113-1c4a1873dc96\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.485083 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-inventory\") pod \"be684f03-c6b3-4538-a113-1c4a1873dc96\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.485177 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ceilometer-compute-config-data-1\") pod \"be684f03-c6b3-4538-a113-1c4a1873dc96\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.485277 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ceilometer-compute-config-data-0\") pod \"be684f03-c6b3-4538-a113-1c4a1873dc96\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.485376 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ceilometer-compute-config-data-2\") pod \"be684f03-c6b3-4538-a113-1c4a1873dc96\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.485402 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cgqcv\" (UniqueName: \"kubernetes.io/projected/be684f03-c6b3-4538-a113-1c4a1873dc96-kube-api-access-cgqcv\") pod \"be684f03-c6b3-4538-a113-1c4a1873dc96\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.485474 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ssh-key\") pod \"be684f03-c6b3-4538-a113-1c4a1873dc96\" (UID: \"be684f03-c6b3-4538-a113-1c4a1873dc96\") " Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.492766 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be684f03-c6b3-4538-a113-1c4a1873dc96-kube-api-access-cgqcv" (OuterVolumeSpecName: "kube-api-access-cgqcv") pod "be684f03-c6b3-4538-a113-1c4a1873dc96" (UID: "be684f03-c6b3-4538-a113-1c4a1873dc96"). InnerVolumeSpecName "kube-api-access-cgqcv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.519757 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "be684f03-c6b3-4538-a113-1c4a1873dc96" (UID: "be684f03-c6b3-4538-a113-1c4a1873dc96"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.521434 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "be684f03-c6b3-4538-a113-1c4a1873dc96" (UID: "be684f03-c6b3-4538-a113-1c4a1873dc96"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.523245 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-inventory" (OuterVolumeSpecName: "inventory") pod "be684f03-c6b3-4538-a113-1c4a1873dc96" (UID: "be684f03-c6b3-4538-a113-1c4a1873dc96"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.524976 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "be684f03-c6b3-4538-a113-1c4a1873dc96" (UID: "be684f03-c6b3-4538-a113-1c4a1873dc96"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.527038 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "be684f03-c6b3-4538-a113-1c4a1873dc96" (UID: "be684f03-c6b3-4538-a113-1c4a1873dc96"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.528787 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "be684f03-c6b3-4538-a113-1c4a1873dc96" (UID: "be684f03-c6b3-4538-a113-1c4a1873dc96"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.586854 5039 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.586894 5039 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.586908 5039 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.586923 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cgqcv\" (UniqueName: \"kubernetes.io/projected/be684f03-c6b3-4538-a113-1c4a1873dc96-kube-api-access-cgqcv\") on node \"crc\" DevicePath \"\"" Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.586935 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.586947 5039 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.586964 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/be684f03-c6b3-4538-a113-1c4a1873dc96-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.904546 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" event={"ID":"be684f03-c6b3-4538-a113-1c4a1873dc96","Type":"ContainerDied","Data":"f2a35cb4d6565b1e281935575d28d2a98ccc16d6497ca623f441fd5b16c674ff"} Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.904755 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f2a35cb4d6565b1e281935575d28d2a98ccc16d6497ca623f441fd5b16c674ff" Nov 24 14:00:41 crc kubenswrapper[5039]: I1124 14:00:41.904642 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.020859 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx"] Nov 24 14:00:42 crc kubenswrapper[5039]: E1124 14:00:42.021334 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be684f03-c6b3-4538-a113-1c4a1873dc96" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.021355 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="be684f03-c6b3-4538-a113-1c4a1873dc96" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 24 14:00:42 crc kubenswrapper[5039]: E1124 14:00:42.021414 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6828d14-20ef-48a1-91cb-c2c3e43744f0" containerName="collect-profiles" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.021422 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6828d14-20ef-48a1-91cb-c2c3e43744f0" containerName="collect-profiles" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.021681 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6828d14-20ef-48a1-91cb-c2c3e43744f0" containerName="collect-profiles" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.021703 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="be684f03-c6b3-4538-a113-1c4a1873dc96" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.022623 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.028843 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-ipmi-config-data" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.029043 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.029289 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.029494 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.030210 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.032948 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx"] Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.096985 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.097114 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.097175 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xc4bx\" (UniqueName: \"kubernetes.io/projected/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-kube-api-access-xc4bx\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.097204 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.097252 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ssh-key\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.097312 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.097388 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: E1124 14:00:42.189718 5039 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe684f03_c6b3_4538_a113_1c4a1873dc96.slice/crio-f2a35cb4d6565b1e281935575d28d2a98ccc16d6497ca623f441fd5b16c674ff\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe684f03_c6b3_4538_a113_1c4a1873dc96.slice\": RecentStats: unable to find data in memory cache]" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.198267 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.198346 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.198387 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xc4bx\" (UniqueName: \"kubernetes.io/projected/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-kube-api-access-xc4bx\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.198409 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.198441 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ssh-key\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.198460 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.199184 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.204342 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.206157 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.206627 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.207152 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ssh-key\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.217030 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.217723 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.229911 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xc4bx\" (UniqueName: \"kubernetes.io/projected/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-kube-api-access-xc4bx\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.345921 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:00:42 crc kubenswrapper[5039]: I1124 14:00:42.936486 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx"] Nov 24 14:00:43 crc kubenswrapper[5039]: I1124 14:00:43.926732 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" event={"ID":"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d","Type":"ContainerStarted","Data":"14aa708a57729306105a393518cccb74152f935950252d1e3a4399ac6d5231c2"} Nov 24 14:00:43 crc kubenswrapper[5039]: I1124 14:00:43.927099 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" event={"ID":"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d","Type":"ContainerStarted","Data":"adaa3a0a0d95603c4b5e6d40ed2a4d77eb822e0847ff11eb416c4694d454b218"} Nov 24 14:00:43 crc kubenswrapper[5039]: I1124 14:00:43.955048 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" podStartSLOduration=2.452725459 podStartE2EDuration="2.955016736s" podCreationTimestamp="2025-11-24 14:00:41 +0000 UTC" firstStartedPulling="2025-11-24 14:00:42.944842554 +0000 UTC m=+2555.383967054" lastFinishedPulling="2025-11-24 14:00:43.447133821 +0000 UTC m=+2555.886258331" observedRunningTime="2025-11-24 14:00:43.949797417 +0000 UTC m=+2556.388921947" watchObservedRunningTime="2025-11-24 14:00:43.955016736 +0000 UTC m=+2556.394141256" Nov 24 14:00:50 crc kubenswrapper[5039]: I1124 14:00:50.305483 5039 scope.go:117] "RemoveContainer" containerID="8fea76e0ae8d5965ad8f64d9623d6e39bda662baa57206f38c5b9d0e603594c1" Nov 24 14:01:00 crc kubenswrapper[5039]: I1124 14:01:00.148957 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29399881-khmls"] Nov 24 14:01:00 crc kubenswrapper[5039]: I1124 14:01:00.151959 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29399881-khmls" Nov 24 14:01:00 crc kubenswrapper[5039]: I1124 14:01:00.174483 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29399881-khmls"] Nov 24 14:01:00 crc kubenswrapper[5039]: I1124 14:01:00.191230 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-fernet-keys\") pod \"keystone-cron-29399881-khmls\" (UID: \"5589d33f-8cad-4a38-ae7d-f9611bb8efc5\") " pod="openstack/keystone-cron-29399881-khmls" Nov 24 14:01:00 crc kubenswrapper[5039]: I1124 14:01:00.191673 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-combined-ca-bundle\") pod \"keystone-cron-29399881-khmls\" (UID: \"5589d33f-8cad-4a38-ae7d-f9611bb8efc5\") " pod="openstack/keystone-cron-29399881-khmls" Nov 24 14:01:00 crc kubenswrapper[5039]: I1124 14:01:00.191819 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-267gn\" (UniqueName: \"kubernetes.io/projected/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-kube-api-access-267gn\") pod \"keystone-cron-29399881-khmls\" (UID: \"5589d33f-8cad-4a38-ae7d-f9611bb8efc5\") " pod="openstack/keystone-cron-29399881-khmls" Nov 24 14:01:00 crc kubenswrapper[5039]: I1124 14:01:00.191941 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-config-data\") pod \"keystone-cron-29399881-khmls\" (UID: \"5589d33f-8cad-4a38-ae7d-f9611bb8efc5\") " pod="openstack/keystone-cron-29399881-khmls" Nov 24 14:01:00 crc kubenswrapper[5039]: I1124 14:01:00.295280 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-fernet-keys\") pod \"keystone-cron-29399881-khmls\" (UID: \"5589d33f-8cad-4a38-ae7d-f9611bb8efc5\") " pod="openstack/keystone-cron-29399881-khmls" Nov 24 14:01:00 crc kubenswrapper[5039]: I1124 14:01:00.295560 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-combined-ca-bundle\") pod \"keystone-cron-29399881-khmls\" (UID: \"5589d33f-8cad-4a38-ae7d-f9611bb8efc5\") " pod="openstack/keystone-cron-29399881-khmls" Nov 24 14:01:00 crc kubenswrapper[5039]: I1124 14:01:00.295690 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-267gn\" (UniqueName: \"kubernetes.io/projected/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-kube-api-access-267gn\") pod \"keystone-cron-29399881-khmls\" (UID: \"5589d33f-8cad-4a38-ae7d-f9611bb8efc5\") " pod="openstack/keystone-cron-29399881-khmls" Nov 24 14:01:00 crc kubenswrapper[5039]: I1124 14:01:00.295767 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-config-data\") pod \"keystone-cron-29399881-khmls\" (UID: \"5589d33f-8cad-4a38-ae7d-f9611bb8efc5\") " pod="openstack/keystone-cron-29399881-khmls" Nov 24 14:01:00 crc kubenswrapper[5039]: I1124 14:01:00.301966 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-config-data\") pod \"keystone-cron-29399881-khmls\" (UID: \"5589d33f-8cad-4a38-ae7d-f9611bb8efc5\") " pod="openstack/keystone-cron-29399881-khmls" Nov 24 14:01:00 crc kubenswrapper[5039]: I1124 14:01:00.302806 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-combined-ca-bundle\") pod \"keystone-cron-29399881-khmls\" (UID: \"5589d33f-8cad-4a38-ae7d-f9611bb8efc5\") " pod="openstack/keystone-cron-29399881-khmls" Nov 24 14:01:00 crc kubenswrapper[5039]: I1124 14:01:00.309644 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-fernet-keys\") pod \"keystone-cron-29399881-khmls\" (UID: \"5589d33f-8cad-4a38-ae7d-f9611bb8efc5\") " pod="openstack/keystone-cron-29399881-khmls" Nov 24 14:01:00 crc kubenswrapper[5039]: I1124 14:01:00.314425 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-267gn\" (UniqueName: \"kubernetes.io/projected/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-kube-api-access-267gn\") pod \"keystone-cron-29399881-khmls\" (UID: \"5589d33f-8cad-4a38-ae7d-f9611bb8efc5\") " pod="openstack/keystone-cron-29399881-khmls" Nov 24 14:01:00 crc kubenswrapper[5039]: I1124 14:01:00.473806 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29399881-khmls" Nov 24 14:01:00 crc kubenswrapper[5039]: I1124 14:01:00.955008 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29399881-khmls"] Nov 24 14:01:00 crc kubenswrapper[5039]: W1124 14:01:00.955242 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5589d33f_8cad_4a38_ae7d_f9611bb8efc5.slice/crio-76de56efe3176ede0be04d84312301162ee9424282a2ab37f03039216d4f848c WatchSource:0}: Error finding container 76de56efe3176ede0be04d84312301162ee9424282a2ab37f03039216d4f848c: Status 404 returned error can't find the container with id 76de56efe3176ede0be04d84312301162ee9424282a2ab37f03039216d4f848c Nov 24 14:01:01 crc kubenswrapper[5039]: I1124 14:01:01.103408 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29399881-khmls" event={"ID":"5589d33f-8cad-4a38-ae7d-f9611bb8efc5","Type":"ContainerStarted","Data":"76de56efe3176ede0be04d84312301162ee9424282a2ab37f03039216d4f848c"} Nov 24 14:01:02 crc kubenswrapper[5039]: I1124 14:01:02.113963 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29399881-khmls" event={"ID":"5589d33f-8cad-4a38-ae7d-f9611bb8efc5","Type":"ContainerStarted","Data":"9b49edb241171cf89c863b9e32f70938c9281a21bd719707d1c3f7f72ee4707c"} Nov 24 14:01:04 crc kubenswrapper[5039]: I1124 14:01:04.155651 5039 generic.go:334] "Generic (PLEG): container finished" podID="5589d33f-8cad-4a38-ae7d-f9611bb8efc5" containerID="9b49edb241171cf89c863b9e32f70938c9281a21bd719707d1c3f7f72ee4707c" exitCode=0 Nov 24 14:01:04 crc kubenswrapper[5039]: I1124 14:01:04.155738 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29399881-khmls" event={"ID":"5589d33f-8cad-4a38-ae7d-f9611bb8efc5","Type":"ContainerDied","Data":"9b49edb241171cf89c863b9e32f70938c9281a21bd719707d1c3f7f72ee4707c"} Nov 24 14:01:05 crc kubenswrapper[5039]: I1124 14:01:05.576676 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29399881-khmls" Nov 24 14:01:05 crc kubenswrapper[5039]: I1124 14:01:05.715278 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-combined-ca-bundle\") pod \"5589d33f-8cad-4a38-ae7d-f9611bb8efc5\" (UID: \"5589d33f-8cad-4a38-ae7d-f9611bb8efc5\") " Nov 24 14:01:05 crc kubenswrapper[5039]: I1124 14:01:05.715396 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-config-data\") pod \"5589d33f-8cad-4a38-ae7d-f9611bb8efc5\" (UID: \"5589d33f-8cad-4a38-ae7d-f9611bb8efc5\") " Nov 24 14:01:05 crc kubenswrapper[5039]: I1124 14:01:05.715466 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-fernet-keys\") pod \"5589d33f-8cad-4a38-ae7d-f9611bb8efc5\" (UID: \"5589d33f-8cad-4a38-ae7d-f9611bb8efc5\") " Nov 24 14:01:05 crc kubenswrapper[5039]: I1124 14:01:05.715494 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-267gn\" (UniqueName: \"kubernetes.io/projected/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-kube-api-access-267gn\") pod \"5589d33f-8cad-4a38-ae7d-f9611bb8efc5\" (UID: \"5589d33f-8cad-4a38-ae7d-f9611bb8efc5\") " Nov 24 14:01:05 crc kubenswrapper[5039]: I1124 14:01:05.721024 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "5589d33f-8cad-4a38-ae7d-f9611bb8efc5" (UID: "5589d33f-8cad-4a38-ae7d-f9611bb8efc5"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:01:05 crc kubenswrapper[5039]: I1124 14:01:05.731935 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-kube-api-access-267gn" (OuterVolumeSpecName: "kube-api-access-267gn") pod "5589d33f-8cad-4a38-ae7d-f9611bb8efc5" (UID: "5589d33f-8cad-4a38-ae7d-f9611bb8efc5"). InnerVolumeSpecName "kube-api-access-267gn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:01:05 crc kubenswrapper[5039]: I1124 14:01:05.761903 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5589d33f-8cad-4a38-ae7d-f9611bb8efc5" (UID: "5589d33f-8cad-4a38-ae7d-f9611bb8efc5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:01:05 crc kubenswrapper[5039]: I1124 14:01:05.797697 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-config-data" (OuterVolumeSpecName: "config-data") pod "5589d33f-8cad-4a38-ae7d-f9611bb8efc5" (UID: "5589d33f-8cad-4a38-ae7d-f9611bb8efc5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:01:05 crc kubenswrapper[5039]: I1124 14:01:05.818496 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:01:05 crc kubenswrapper[5039]: I1124 14:01:05.818568 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 14:01:05 crc kubenswrapper[5039]: I1124 14:01:05.818581 5039 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 24 14:01:05 crc kubenswrapper[5039]: I1124 14:01:05.818593 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-267gn\" (UniqueName: \"kubernetes.io/projected/5589d33f-8cad-4a38-ae7d-f9611bb8efc5-kube-api-access-267gn\") on node \"crc\" DevicePath \"\"" Nov 24 14:01:06 crc kubenswrapper[5039]: I1124 14:01:06.183347 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29399881-khmls" event={"ID":"5589d33f-8cad-4a38-ae7d-f9611bb8efc5","Type":"ContainerDied","Data":"76de56efe3176ede0be04d84312301162ee9424282a2ab37f03039216d4f848c"} Nov 24 14:01:06 crc kubenswrapper[5039]: I1124 14:01:06.183391 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76de56efe3176ede0be04d84312301162ee9424282a2ab37f03039216d4f848c" Nov 24 14:01:06 crc kubenswrapper[5039]: I1124 14:01:06.183428 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29399881-khmls" Nov 24 14:02:20 crc kubenswrapper[5039]: I1124 14:02:20.101239 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:02:20 crc kubenswrapper[5039]: I1124 14:02:20.101945 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:02:41 crc kubenswrapper[5039]: I1124 14:02:41.308594 5039 generic.go:334] "Generic (PLEG): container finished" podID="a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d" containerID="14aa708a57729306105a393518cccb74152f935950252d1e3a4399ac6d5231c2" exitCode=0 Nov 24 14:02:41 crc kubenswrapper[5039]: I1124 14:02:41.309095 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" event={"ID":"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d","Type":"ContainerDied","Data":"14aa708a57729306105a393518cccb74152f935950252d1e3a4399ac6d5231c2"} Nov 24 14:02:42 crc kubenswrapper[5039]: I1124 14:02:42.747467 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:02:42 crc kubenswrapper[5039]: I1124 14:02:42.933824 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-telemetry-power-monitoring-combined-ca-bundle\") pod \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " Nov 24 14:02:42 crc kubenswrapper[5039]: I1124 14:02:42.934012 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ceilometer-ipmi-config-data-1\") pod \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " Nov 24 14:02:42 crc kubenswrapper[5039]: I1124 14:02:42.934060 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xc4bx\" (UniqueName: \"kubernetes.io/projected/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-kube-api-access-xc4bx\") pod \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " Nov 24 14:02:42 crc kubenswrapper[5039]: I1124 14:02:42.934232 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ssh-key\") pod \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " Nov 24 14:02:42 crc kubenswrapper[5039]: I1124 14:02:42.934299 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-inventory\") pod \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " Nov 24 14:02:42 crc kubenswrapper[5039]: I1124 14:02:42.935080 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ceilometer-ipmi-config-data-2\") pod \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " Nov 24 14:02:42 crc kubenswrapper[5039]: I1124 14:02:42.935142 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ceilometer-ipmi-config-data-0\") pod \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\" (UID: \"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d\") " Nov 24 14:02:42 crc kubenswrapper[5039]: I1124 14:02:42.939841 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-kube-api-access-xc4bx" (OuterVolumeSpecName: "kube-api-access-xc4bx") pod "a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d" (UID: "a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d"). InnerVolumeSpecName "kube-api-access-xc4bx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:02:42 crc kubenswrapper[5039]: I1124 14:02:42.940891 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-telemetry-power-monitoring-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-power-monitoring-combined-ca-bundle") pod "a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d" (UID: "a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d"). InnerVolumeSpecName "telemetry-power-monitoring-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:02:42 crc kubenswrapper[5039]: I1124 14:02:42.965499 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-inventory" (OuterVolumeSpecName: "inventory") pod "a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d" (UID: "a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:02:42 crc kubenswrapper[5039]: I1124 14:02:42.967550 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ceilometer-ipmi-config-data-1" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-1") pod "a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d" (UID: "a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d"). InnerVolumeSpecName "ceilometer-ipmi-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:02:42 crc kubenswrapper[5039]: I1124 14:02:42.970498 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ceilometer-ipmi-config-data-0" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-0") pod "a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d" (UID: "a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d"). InnerVolumeSpecName "ceilometer-ipmi-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:02:42 crc kubenswrapper[5039]: I1124 14:02:42.972989 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ceilometer-ipmi-config-data-2" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-2") pod "a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d" (UID: "a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d"). InnerVolumeSpecName "ceilometer-ipmi-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:02:42 crc kubenswrapper[5039]: I1124 14:02:42.973603 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d" (UID: "a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.037585 5039 reconciler_common.go:293] "Volume detached for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-telemetry-power-monitoring-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.037660 5039 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ceilometer-ipmi-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.037671 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xc4bx\" (UniqueName: \"kubernetes.io/projected/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-kube-api-access-xc4bx\") on node \"crc\" DevicePath \"\"" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.037680 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.037689 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.037700 5039 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ceilometer-ipmi-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.037709 5039 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d-ceilometer-ipmi-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.329039 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" event={"ID":"a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d","Type":"ContainerDied","Data":"adaa3a0a0d95603c4b5e6d40ed2a4d77eb822e0847ff11eb416c4694d454b218"} Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.329081 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="adaa3a0a0d95603c4b5e6d40ed2a4d77eb822e0847ff11eb416c4694d454b218" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.329099 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.437923 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh"] Nov 24 14:02:43 crc kubenswrapper[5039]: E1124 14:02:43.438654 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5589d33f-8cad-4a38-ae7d-f9611bb8efc5" containerName="keystone-cron" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.438679 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="5589d33f-8cad-4a38-ae7d-f9611bb8efc5" containerName="keystone-cron" Nov 24 14:02:43 crc kubenswrapper[5039]: E1124 14:02:43.438735 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.438745 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.439019 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="5589d33f-8cad-4a38-ae7d-f9611bb8efc5" containerName="keystone-cron" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.439069 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.440039 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.442427 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.442430 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"logging-compute-config-data" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.442812 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.442953 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.443268 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.448274 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh"] Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.545815 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-nnvjh\" (UID: \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.546064 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-nnvjh\" (UID: \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.546419 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78nhk\" (UniqueName: \"kubernetes.io/projected/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-kube-api-access-78nhk\") pod \"logging-edpm-deployment-openstack-edpm-ipam-nnvjh\" (UID: \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.546458 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-ssh-key\") pod \"logging-edpm-deployment-openstack-edpm-ipam-nnvjh\" (UID: \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.546530 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-nnvjh\" (UID: \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.648644 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-nnvjh\" (UID: \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.649003 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78nhk\" (UniqueName: \"kubernetes.io/projected/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-kube-api-access-78nhk\") pod \"logging-edpm-deployment-openstack-edpm-ipam-nnvjh\" (UID: \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.649147 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-ssh-key\") pod \"logging-edpm-deployment-openstack-edpm-ipam-nnvjh\" (UID: \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.649306 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-nnvjh\" (UID: \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.649534 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-nnvjh\" (UID: \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.654692 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-nnvjh\" (UID: \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.658877 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-ssh-key\") pod \"logging-edpm-deployment-openstack-edpm-ipam-nnvjh\" (UID: \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.658968 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-nnvjh\" (UID: \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.663765 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-nnvjh\" (UID: \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.665243 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78nhk\" (UniqueName: \"kubernetes.io/projected/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-kube-api-access-78nhk\") pod \"logging-edpm-deployment-openstack-edpm-ipam-nnvjh\" (UID: \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" Nov 24 14:02:43 crc kubenswrapper[5039]: I1124 14:02:43.759057 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" Nov 24 14:02:44 crc kubenswrapper[5039]: I1124 14:02:44.289382 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh"] Nov 24 14:02:44 crc kubenswrapper[5039]: I1124 14:02:44.363431 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" event={"ID":"8b4db333-74a9-4093-bbc4-51bd0e48d5d1","Type":"ContainerStarted","Data":"55ef78fe2c0d84925582544b241277dc125a9b39e8ab3137ea5f3323859827c3"} Nov 24 14:02:45 crc kubenswrapper[5039]: I1124 14:02:45.375708 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" event={"ID":"8b4db333-74a9-4093-bbc4-51bd0e48d5d1","Type":"ContainerStarted","Data":"e2e911ea32c407d7f6341fff73e4571e663d0b9320fa23b9b4ac7c7475e7695b"} Nov 24 14:02:45 crc kubenswrapper[5039]: I1124 14:02:45.404699 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" podStartSLOduration=1.935392691 podStartE2EDuration="2.404680147s" podCreationTimestamp="2025-11-24 14:02:43 +0000 UTC" firstStartedPulling="2025-11-24 14:02:44.293484217 +0000 UTC m=+2676.732608717" lastFinishedPulling="2025-11-24 14:02:44.762771673 +0000 UTC m=+2677.201896173" observedRunningTime="2025-11-24 14:02:45.391373879 +0000 UTC m=+2677.830498389" watchObservedRunningTime="2025-11-24 14:02:45.404680147 +0000 UTC m=+2677.843804657" Nov 24 14:02:50 crc kubenswrapper[5039]: I1124 14:02:50.101226 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:02:50 crc kubenswrapper[5039]: I1124 14:02:50.101867 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:03:01 crc kubenswrapper[5039]: I1124 14:03:01.751022 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5jwbj"] Nov 24 14:03:01 crc kubenswrapper[5039]: I1124 14:03:01.758031 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5jwbj" Nov 24 14:03:01 crc kubenswrapper[5039]: I1124 14:03:01.766322 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5jwbj"] Nov 24 14:03:01 crc kubenswrapper[5039]: I1124 14:03:01.848874 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b59a109-9c9a-46da-867f-3d0be9a46b17-utilities\") pod \"community-operators-5jwbj\" (UID: \"0b59a109-9c9a-46da-867f-3d0be9a46b17\") " pod="openshift-marketplace/community-operators-5jwbj" Nov 24 14:03:01 crc kubenswrapper[5039]: I1124 14:03:01.849051 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lt4n4\" (UniqueName: \"kubernetes.io/projected/0b59a109-9c9a-46da-867f-3d0be9a46b17-kube-api-access-lt4n4\") pod \"community-operators-5jwbj\" (UID: \"0b59a109-9c9a-46da-867f-3d0be9a46b17\") " pod="openshift-marketplace/community-operators-5jwbj" Nov 24 14:03:01 crc kubenswrapper[5039]: I1124 14:03:01.849098 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b59a109-9c9a-46da-867f-3d0be9a46b17-catalog-content\") pod \"community-operators-5jwbj\" (UID: \"0b59a109-9c9a-46da-867f-3d0be9a46b17\") " pod="openshift-marketplace/community-operators-5jwbj" Nov 24 14:03:01 crc kubenswrapper[5039]: I1124 14:03:01.951477 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b59a109-9c9a-46da-867f-3d0be9a46b17-utilities\") pod \"community-operators-5jwbj\" (UID: \"0b59a109-9c9a-46da-867f-3d0be9a46b17\") " pod="openshift-marketplace/community-operators-5jwbj" Nov 24 14:03:01 crc kubenswrapper[5039]: I1124 14:03:01.951985 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lt4n4\" (UniqueName: \"kubernetes.io/projected/0b59a109-9c9a-46da-867f-3d0be9a46b17-kube-api-access-lt4n4\") pod \"community-operators-5jwbj\" (UID: \"0b59a109-9c9a-46da-867f-3d0be9a46b17\") " pod="openshift-marketplace/community-operators-5jwbj" Nov 24 14:03:01 crc kubenswrapper[5039]: I1124 14:03:01.952044 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b59a109-9c9a-46da-867f-3d0be9a46b17-catalog-content\") pod \"community-operators-5jwbj\" (UID: \"0b59a109-9c9a-46da-867f-3d0be9a46b17\") " pod="openshift-marketplace/community-operators-5jwbj" Nov 24 14:03:01 crc kubenswrapper[5039]: I1124 14:03:01.952075 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b59a109-9c9a-46da-867f-3d0be9a46b17-utilities\") pod \"community-operators-5jwbj\" (UID: \"0b59a109-9c9a-46da-867f-3d0be9a46b17\") " pod="openshift-marketplace/community-operators-5jwbj" Nov 24 14:03:01 crc kubenswrapper[5039]: I1124 14:03:01.952456 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b59a109-9c9a-46da-867f-3d0be9a46b17-catalog-content\") pod \"community-operators-5jwbj\" (UID: \"0b59a109-9c9a-46da-867f-3d0be9a46b17\") " pod="openshift-marketplace/community-operators-5jwbj" Nov 24 14:03:01 crc kubenswrapper[5039]: I1124 14:03:01.974243 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lt4n4\" (UniqueName: \"kubernetes.io/projected/0b59a109-9c9a-46da-867f-3d0be9a46b17-kube-api-access-lt4n4\") pod \"community-operators-5jwbj\" (UID: \"0b59a109-9c9a-46da-867f-3d0be9a46b17\") " pod="openshift-marketplace/community-operators-5jwbj" Nov 24 14:03:02 crc kubenswrapper[5039]: I1124 14:03:02.095031 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5jwbj" Nov 24 14:03:02 crc kubenswrapper[5039]: W1124 14:03:02.605003 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0b59a109_9c9a_46da_867f_3d0be9a46b17.slice/crio-5f906192541b289c06d81665c00afcc033034cd899a9f577d074463dc071f868 WatchSource:0}: Error finding container 5f906192541b289c06d81665c00afcc033034cd899a9f577d074463dc071f868: Status 404 returned error can't find the container with id 5f906192541b289c06d81665c00afcc033034cd899a9f577d074463dc071f868 Nov 24 14:03:02 crc kubenswrapper[5039]: I1124 14:03:02.605163 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5jwbj"] Nov 24 14:03:03 crc kubenswrapper[5039]: I1124 14:03:03.600211 5039 generic.go:334] "Generic (PLEG): container finished" podID="0b59a109-9c9a-46da-867f-3d0be9a46b17" containerID="d000e5e9218a5f3d75569d0219a9a21963bc9dc32573e58133a6178ae6dea1b7" exitCode=0 Nov 24 14:03:03 crc kubenswrapper[5039]: I1124 14:03:03.603491 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5jwbj" event={"ID":"0b59a109-9c9a-46da-867f-3d0be9a46b17","Type":"ContainerDied","Data":"d000e5e9218a5f3d75569d0219a9a21963bc9dc32573e58133a6178ae6dea1b7"} Nov 24 14:03:03 crc kubenswrapper[5039]: I1124 14:03:03.603705 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5jwbj" event={"ID":"0b59a109-9c9a-46da-867f-3d0be9a46b17","Type":"ContainerStarted","Data":"5f906192541b289c06d81665c00afcc033034cd899a9f577d074463dc071f868"} Nov 24 14:03:03 crc kubenswrapper[5039]: I1124 14:03:03.605393 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" event={"ID":"8b4db333-74a9-4093-bbc4-51bd0e48d5d1","Type":"ContainerDied","Data":"e2e911ea32c407d7f6341fff73e4571e663d0b9320fa23b9b4ac7c7475e7695b"} Nov 24 14:03:03 crc kubenswrapper[5039]: I1124 14:03:03.605540 5039 generic.go:334] "Generic (PLEG): container finished" podID="8b4db333-74a9-4093-bbc4-51bd0e48d5d1" containerID="e2e911ea32c407d7f6341fff73e4571e663d0b9320fa23b9b4ac7c7475e7695b" exitCode=0 Nov 24 14:03:04 crc kubenswrapper[5039]: I1124 14:03:04.622754 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5jwbj" event={"ID":"0b59a109-9c9a-46da-867f-3d0be9a46b17","Type":"ContainerStarted","Data":"077baeec8af24e1b5484a35b76118ffababcf1b80a2c6aea820022eada481761"} Nov 24 14:03:05 crc kubenswrapper[5039]: I1124 14:03:05.108894 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" Nov 24 14:03:05 crc kubenswrapper[5039]: I1124 14:03:05.226979 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-logging-compute-config-data-0\") pod \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\" (UID: \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\") " Nov 24 14:03:05 crc kubenswrapper[5039]: I1124 14:03:05.227028 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-ssh-key\") pod \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\" (UID: \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\") " Nov 24 14:03:05 crc kubenswrapper[5039]: I1124 14:03:05.227217 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-inventory\") pod \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\" (UID: \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\") " Nov 24 14:03:05 crc kubenswrapper[5039]: I1124 14:03:05.227277 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-logging-compute-config-data-1\") pod \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\" (UID: \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\") " Nov 24 14:03:05 crc kubenswrapper[5039]: I1124 14:03:05.227309 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78nhk\" (UniqueName: \"kubernetes.io/projected/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-kube-api-access-78nhk\") pod \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\" (UID: \"8b4db333-74a9-4093-bbc4-51bd0e48d5d1\") " Nov 24 14:03:05 crc kubenswrapper[5039]: I1124 14:03:05.234038 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-kube-api-access-78nhk" (OuterVolumeSpecName: "kube-api-access-78nhk") pod "8b4db333-74a9-4093-bbc4-51bd0e48d5d1" (UID: "8b4db333-74a9-4093-bbc4-51bd0e48d5d1"). InnerVolumeSpecName "kube-api-access-78nhk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:03:05 crc kubenswrapper[5039]: I1124 14:03:05.258226 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-logging-compute-config-data-1" (OuterVolumeSpecName: "logging-compute-config-data-1") pod "8b4db333-74a9-4093-bbc4-51bd0e48d5d1" (UID: "8b4db333-74a9-4093-bbc4-51bd0e48d5d1"). InnerVolumeSpecName "logging-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:03:05 crc kubenswrapper[5039]: I1124 14:03:05.310215 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8b4db333-74a9-4093-bbc4-51bd0e48d5d1" (UID: "8b4db333-74a9-4093-bbc4-51bd0e48d5d1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:03:05 crc kubenswrapper[5039]: I1124 14:03:05.311035 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-logging-compute-config-data-0" (OuterVolumeSpecName: "logging-compute-config-data-0") pod "8b4db333-74a9-4093-bbc4-51bd0e48d5d1" (UID: "8b4db333-74a9-4093-bbc4-51bd0e48d5d1"). InnerVolumeSpecName "logging-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:03:05 crc kubenswrapper[5039]: I1124 14:03:05.315692 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-inventory" (OuterVolumeSpecName: "inventory") pod "8b4db333-74a9-4093-bbc4-51bd0e48d5d1" (UID: "8b4db333-74a9-4093-bbc4-51bd0e48d5d1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:03:05 crc kubenswrapper[5039]: I1124 14:03:05.330692 5039 reconciler_common.go:293] "Volume detached for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-logging-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 24 14:03:05 crc kubenswrapper[5039]: I1124 14:03:05.330734 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:03:05 crc kubenswrapper[5039]: I1124 14:03:05.330748 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 14:03:05 crc kubenswrapper[5039]: I1124 14:03:05.330760 5039 reconciler_common.go:293] "Volume detached for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-logging-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 24 14:03:05 crc kubenswrapper[5039]: I1124 14:03:05.330774 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78nhk\" (UniqueName: \"kubernetes.io/projected/8b4db333-74a9-4093-bbc4-51bd0e48d5d1-kube-api-access-78nhk\") on node \"crc\" DevicePath \"\"" Nov 24 14:03:05 crc kubenswrapper[5039]: I1124 14:03:05.634978 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" Nov 24 14:03:05 crc kubenswrapper[5039]: I1124 14:03:05.634973 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh" event={"ID":"8b4db333-74a9-4093-bbc4-51bd0e48d5d1","Type":"ContainerDied","Data":"55ef78fe2c0d84925582544b241277dc125a9b39e8ab3137ea5f3323859827c3"} Nov 24 14:03:05 crc kubenswrapper[5039]: I1124 14:03:05.635036 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="55ef78fe2c0d84925582544b241277dc125a9b39e8ab3137ea5f3323859827c3" Nov 24 14:03:06 crc kubenswrapper[5039]: I1124 14:03:06.648115 5039 generic.go:334] "Generic (PLEG): container finished" podID="0b59a109-9c9a-46da-867f-3d0be9a46b17" containerID="077baeec8af24e1b5484a35b76118ffababcf1b80a2c6aea820022eada481761" exitCode=0 Nov 24 14:03:06 crc kubenswrapper[5039]: I1124 14:03:06.648192 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5jwbj" event={"ID":"0b59a109-9c9a-46da-867f-3d0be9a46b17","Type":"ContainerDied","Data":"077baeec8af24e1b5484a35b76118ffababcf1b80a2c6aea820022eada481761"} Nov 24 14:03:07 crc kubenswrapper[5039]: I1124 14:03:07.663942 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5jwbj" event={"ID":"0b59a109-9c9a-46da-867f-3d0be9a46b17","Type":"ContainerStarted","Data":"c869c95e16616fd6f145ae15109799ffab4707c2bf67f010dda450b0d7d5411c"} Nov 24 14:03:07 crc kubenswrapper[5039]: I1124 14:03:07.696669 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5jwbj" podStartSLOduration=3.029854786 podStartE2EDuration="6.696652477s" podCreationTimestamp="2025-11-24 14:03:01 +0000 UTC" firstStartedPulling="2025-11-24 14:03:03.604598355 +0000 UTC m=+2696.043722855" lastFinishedPulling="2025-11-24 14:03:07.271396046 +0000 UTC m=+2699.710520546" observedRunningTime="2025-11-24 14:03:07.689766617 +0000 UTC m=+2700.128891127" watchObservedRunningTime="2025-11-24 14:03:07.696652477 +0000 UTC m=+2700.135776977" Nov 24 14:03:12 crc kubenswrapper[5039]: I1124 14:03:12.095273 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5jwbj" Nov 24 14:03:12 crc kubenswrapper[5039]: I1124 14:03:12.095640 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5jwbj" Nov 24 14:03:12 crc kubenswrapper[5039]: I1124 14:03:12.187884 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5jwbj" Nov 24 14:03:12 crc kubenswrapper[5039]: I1124 14:03:12.803677 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5jwbj" Nov 24 14:03:12 crc kubenswrapper[5039]: I1124 14:03:12.882048 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5jwbj"] Nov 24 14:03:14 crc kubenswrapper[5039]: I1124 14:03:14.742617 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5jwbj" podUID="0b59a109-9c9a-46da-867f-3d0be9a46b17" containerName="registry-server" containerID="cri-o://c869c95e16616fd6f145ae15109799ffab4707c2bf67f010dda450b0d7d5411c" gracePeriod=2 Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.343053 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5jwbj" Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.477674 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lt4n4\" (UniqueName: \"kubernetes.io/projected/0b59a109-9c9a-46da-867f-3d0be9a46b17-kube-api-access-lt4n4\") pod \"0b59a109-9c9a-46da-867f-3d0be9a46b17\" (UID: \"0b59a109-9c9a-46da-867f-3d0be9a46b17\") " Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.477805 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b59a109-9c9a-46da-867f-3d0be9a46b17-catalog-content\") pod \"0b59a109-9c9a-46da-867f-3d0be9a46b17\" (UID: \"0b59a109-9c9a-46da-867f-3d0be9a46b17\") " Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.477885 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b59a109-9c9a-46da-867f-3d0be9a46b17-utilities\") pod \"0b59a109-9c9a-46da-867f-3d0be9a46b17\" (UID: \"0b59a109-9c9a-46da-867f-3d0be9a46b17\") " Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.478391 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b59a109-9c9a-46da-867f-3d0be9a46b17-utilities" (OuterVolumeSpecName: "utilities") pod "0b59a109-9c9a-46da-867f-3d0be9a46b17" (UID: "0b59a109-9c9a-46da-867f-3d0be9a46b17"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.478786 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b59a109-9c9a-46da-867f-3d0be9a46b17-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.484981 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b59a109-9c9a-46da-867f-3d0be9a46b17-kube-api-access-lt4n4" (OuterVolumeSpecName: "kube-api-access-lt4n4") pod "0b59a109-9c9a-46da-867f-3d0be9a46b17" (UID: "0b59a109-9c9a-46da-867f-3d0be9a46b17"). InnerVolumeSpecName "kube-api-access-lt4n4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.545125 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b59a109-9c9a-46da-867f-3d0be9a46b17-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0b59a109-9c9a-46da-867f-3d0be9a46b17" (UID: "0b59a109-9c9a-46da-867f-3d0be9a46b17"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.581165 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lt4n4\" (UniqueName: \"kubernetes.io/projected/0b59a109-9c9a-46da-867f-3d0be9a46b17-kube-api-access-lt4n4\") on node \"crc\" DevicePath \"\"" Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.581212 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b59a109-9c9a-46da-867f-3d0be9a46b17-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.755861 5039 generic.go:334] "Generic (PLEG): container finished" podID="0b59a109-9c9a-46da-867f-3d0be9a46b17" containerID="c869c95e16616fd6f145ae15109799ffab4707c2bf67f010dda450b0d7d5411c" exitCode=0 Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.755988 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5jwbj" Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.755956 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5jwbj" event={"ID":"0b59a109-9c9a-46da-867f-3d0be9a46b17","Type":"ContainerDied","Data":"c869c95e16616fd6f145ae15109799ffab4707c2bf67f010dda450b0d7d5411c"} Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.756784 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5jwbj" event={"ID":"0b59a109-9c9a-46da-867f-3d0be9a46b17","Type":"ContainerDied","Data":"5f906192541b289c06d81665c00afcc033034cd899a9f577d074463dc071f868"} Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.756818 5039 scope.go:117] "RemoveContainer" containerID="c869c95e16616fd6f145ae15109799ffab4707c2bf67f010dda450b0d7d5411c" Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.797035 5039 scope.go:117] "RemoveContainer" containerID="077baeec8af24e1b5484a35b76118ffababcf1b80a2c6aea820022eada481761" Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.805480 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5jwbj"] Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.816207 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5jwbj"] Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.819672 5039 scope.go:117] "RemoveContainer" containerID="d000e5e9218a5f3d75569d0219a9a21963bc9dc32573e58133a6178ae6dea1b7" Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.877649 5039 scope.go:117] "RemoveContainer" containerID="c869c95e16616fd6f145ae15109799ffab4707c2bf67f010dda450b0d7d5411c" Nov 24 14:03:15 crc kubenswrapper[5039]: E1124 14:03:15.878073 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c869c95e16616fd6f145ae15109799ffab4707c2bf67f010dda450b0d7d5411c\": container with ID starting with c869c95e16616fd6f145ae15109799ffab4707c2bf67f010dda450b0d7d5411c not found: ID does not exist" containerID="c869c95e16616fd6f145ae15109799ffab4707c2bf67f010dda450b0d7d5411c" Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.878122 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c869c95e16616fd6f145ae15109799ffab4707c2bf67f010dda450b0d7d5411c"} err="failed to get container status \"c869c95e16616fd6f145ae15109799ffab4707c2bf67f010dda450b0d7d5411c\": rpc error: code = NotFound desc = could not find container \"c869c95e16616fd6f145ae15109799ffab4707c2bf67f010dda450b0d7d5411c\": container with ID starting with c869c95e16616fd6f145ae15109799ffab4707c2bf67f010dda450b0d7d5411c not found: ID does not exist" Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.878150 5039 scope.go:117] "RemoveContainer" containerID="077baeec8af24e1b5484a35b76118ffababcf1b80a2c6aea820022eada481761" Nov 24 14:03:15 crc kubenswrapper[5039]: E1124 14:03:15.878480 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"077baeec8af24e1b5484a35b76118ffababcf1b80a2c6aea820022eada481761\": container with ID starting with 077baeec8af24e1b5484a35b76118ffababcf1b80a2c6aea820022eada481761 not found: ID does not exist" containerID="077baeec8af24e1b5484a35b76118ffababcf1b80a2c6aea820022eada481761" Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.878514 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"077baeec8af24e1b5484a35b76118ffababcf1b80a2c6aea820022eada481761"} err="failed to get container status \"077baeec8af24e1b5484a35b76118ffababcf1b80a2c6aea820022eada481761\": rpc error: code = NotFound desc = could not find container \"077baeec8af24e1b5484a35b76118ffababcf1b80a2c6aea820022eada481761\": container with ID starting with 077baeec8af24e1b5484a35b76118ffababcf1b80a2c6aea820022eada481761 not found: ID does not exist" Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.878542 5039 scope.go:117] "RemoveContainer" containerID="d000e5e9218a5f3d75569d0219a9a21963bc9dc32573e58133a6178ae6dea1b7" Nov 24 14:03:15 crc kubenswrapper[5039]: E1124 14:03:15.878862 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d000e5e9218a5f3d75569d0219a9a21963bc9dc32573e58133a6178ae6dea1b7\": container with ID starting with d000e5e9218a5f3d75569d0219a9a21963bc9dc32573e58133a6178ae6dea1b7 not found: ID does not exist" containerID="d000e5e9218a5f3d75569d0219a9a21963bc9dc32573e58133a6178ae6dea1b7" Nov 24 14:03:15 crc kubenswrapper[5039]: I1124 14:03:15.878900 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d000e5e9218a5f3d75569d0219a9a21963bc9dc32573e58133a6178ae6dea1b7"} err="failed to get container status \"d000e5e9218a5f3d75569d0219a9a21963bc9dc32573e58133a6178ae6dea1b7\": rpc error: code = NotFound desc = could not find container \"d000e5e9218a5f3d75569d0219a9a21963bc9dc32573e58133a6178ae6dea1b7\": container with ID starting with d000e5e9218a5f3d75569d0219a9a21963bc9dc32573e58133a6178ae6dea1b7 not found: ID does not exist" Nov 24 14:03:16 crc kubenswrapper[5039]: I1124 14:03:16.321899 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b59a109-9c9a-46da-867f-3d0be9a46b17" path="/var/lib/kubelet/pods/0b59a109-9c9a-46da-867f-3d0be9a46b17/volumes" Nov 24 14:03:20 crc kubenswrapper[5039]: I1124 14:03:20.100894 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:03:20 crc kubenswrapper[5039]: I1124 14:03:20.101218 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:03:20 crc kubenswrapper[5039]: I1124 14:03:20.101255 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 14:03:20 crc kubenswrapper[5039]: I1124 14:03:20.101849 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f4df9e2b4ae7b3d6fb4f3a538dbbb6b8373edbff29ee3cbb674417bf32f805d0"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 14:03:20 crc kubenswrapper[5039]: I1124 14:03:20.101898 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://f4df9e2b4ae7b3d6fb4f3a538dbbb6b8373edbff29ee3cbb674417bf32f805d0" gracePeriod=600 Nov 24 14:03:20 crc kubenswrapper[5039]: I1124 14:03:20.817364 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="f4df9e2b4ae7b3d6fb4f3a538dbbb6b8373edbff29ee3cbb674417bf32f805d0" exitCode=0 Nov 24 14:03:20 crc kubenswrapper[5039]: I1124 14:03:20.817422 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"f4df9e2b4ae7b3d6fb4f3a538dbbb6b8373edbff29ee3cbb674417bf32f805d0"} Nov 24 14:03:20 crc kubenswrapper[5039]: I1124 14:03:20.817741 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36"} Nov 24 14:03:20 crc kubenswrapper[5039]: I1124 14:03:20.817767 5039 scope.go:117] "RemoveContainer" containerID="2e6c8d1a5b68dbc1b99b70d78084a0cd3549adfc83d211a6a428b3ee1524a451" Nov 24 14:04:20 crc kubenswrapper[5039]: I1124 14:04:20.122126 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-92d4g"] Nov 24 14:04:20 crc kubenswrapper[5039]: E1124 14:04:20.123154 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b59a109-9c9a-46da-867f-3d0be9a46b17" containerName="extract-content" Nov 24 14:04:20 crc kubenswrapper[5039]: I1124 14:04:20.123171 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b59a109-9c9a-46da-867f-3d0be9a46b17" containerName="extract-content" Nov 24 14:04:20 crc kubenswrapper[5039]: E1124 14:04:20.123187 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b4db333-74a9-4093-bbc4-51bd0e48d5d1" containerName="logging-edpm-deployment-openstack-edpm-ipam" Nov 24 14:04:20 crc kubenswrapper[5039]: I1124 14:04:20.123198 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b4db333-74a9-4093-bbc4-51bd0e48d5d1" containerName="logging-edpm-deployment-openstack-edpm-ipam" Nov 24 14:04:20 crc kubenswrapper[5039]: E1124 14:04:20.123220 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b59a109-9c9a-46da-867f-3d0be9a46b17" containerName="extract-utilities" Nov 24 14:04:20 crc kubenswrapper[5039]: I1124 14:04:20.123229 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b59a109-9c9a-46da-867f-3d0be9a46b17" containerName="extract-utilities" Nov 24 14:04:20 crc kubenswrapper[5039]: E1124 14:04:20.123273 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b59a109-9c9a-46da-867f-3d0be9a46b17" containerName="registry-server" Nov 24 14:04:20 crc kubenswrapper[5039]: I1124 14:04:20.123288 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b59a109-9c9a-46da-867f-3d0be9a46b17" containerName="registry-server" Nov 24 14:04:20 crc kubenswrapper[5039]: I1124 14:04:20.123637 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b59a109-9c9a-46da-867f-3d0be9a46b17" containerName="registry-server" Nov 24 14:04:20 crc kubenswrapper[5039]: I1124 14:04:20.123676 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b4db333-74a9-4093-bbc4-51bd0e48d5d1" containerName="logging-edpm-deployment-openstack-edpm-ipam" Nov 24 14:04:20 crc kubenswrapper[5039]: I1124 14:04:20.125611 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-92d4g" Nov 24 14:04:20 crc kubenswrapper[5039]: I1124 14:04:20.139142 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-92d4g"] Nov 24 14:04:20 crc kubenswrapper[5039]: I1124 14:04:20.281471 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09894d54-c8e4-4ba8-acf2-e3e47bd32e31-utilities\") pod \"certified-operators-92d4g\" (UID: \"09894d54-c8e4-4ba8-acf2-e3e47bd32e31\") " pod="openshift-marketplace/certified-operators-92d4g" Nov 24 14:04:20 crc kubenswrapper[5039]: I1124 14:04:20.281593 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mk7rk\" (UniqueName: \"kubernetes.io/projected/09894d54-c8e4-4ba8-acf2-e3e47bd32e31-kube-api-access-mk7rk\") pod \"certified-operators-92d4g\" (UID: \"09894d54-c8e4-4ba8-acf2-e3e47bd32e31\") " pod="openshift-marketplace/certified-operators-92d4g" Nov 24 14:04:20 crc kubenswrapper[5039]: I1124 14:04:20.281648 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09894d54-c8e4-4ba8-acf2-e3e47bd32e31-catalog-content\") pod \"certified-operators-92d4g\" (UID: \"09894d54-c8e4-4ba8-acf2-e3e47bd32e31\") " pod="openshift-marketplace/certified-operators-92d4g" Nov 24 14:04:20 crc kubenswrapper[5039]: I1124 14:04:20.383225 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09894d54-c8e4-4ba8-acf2-e3e47bd32e31-utilities\") pod \"certified-operators-92d4g\" (UID: \"09894d54-c8e4-4ba8-acf2-e3e47bd32e31\") " pod="openshift-marketplace/certified-operators-92d4g" Nov 24 14:04:20 crc kubenswrapper[5039]: I1124 14:04:20.383740 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mk7rk\" (UniqueName: \"kubernetes.io/projected/09894d54-c8e4-4ba8-acf2-e3e47bd32e31-kube-api-access-mk7rk\") pod \"certified-operators-92d4g\" (UID: \"09894d54-c8e4-4ba8-acf2-e3e47bd32e31\") " pod="openshift-marketplace/certified-operators-92d4g" Nov 24 14:04:20 crc kubenswrapper[5039]: I1124 14:04:20.383838 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09894d54-c8e4-4ba8-acf2-e3e47bd32e31-catalog-content\") pod \"certified-operators-92d4g\" (UID: \"09894d54-c8e4-4ba8-acf2-e3e47bd32e31\") " pod="openshift-marketplace/certified-operators-92d4g" Nov 24 14:04:20 crc kubenswrapper[5039]: I1124 14:04:20.383895 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09894d54-c8e4-4ba8-acf2-e3e47bd32e31-utilities\") pod \"certified-operators-92d4g\" (UID: \"09894d54-c8e4-4ba8-acf2-e3e47bd32e31\") " pod="openshift-marketplace/certified-operators-92d4g" Nov 24 14:04:20 crc kubenswrapper[5039]: I1124 14:04:20.384250 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09894d54-c8e4-4ba8-acf2-e3e47bd32e31-catalog-content\") pod \"certified-operators-92d4g\" (UID: \"09894d54-c8e4-4ba8-acf2-e3e47bd32e31\") " pod="openshift-marketplace/certified-operators-92d4g" Nov 24 14:04:20 crc kubenswrapper[5039]: I1124 14:04:20.406497 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mk7rk\" (UniqueName: \"kubernetes.io/projected/09894d54-c8e4-4ba8-acf2-e3e47bd32e31-kube-api-access-mk7rk\") pod \"certified-operators-92d4g\" (UID: \"09894d54-c8e4-4ba8-acf2-e3e47bd32e31\") " pod="openshift-marketplace/certified-operators-92d4g" Nov 24 14:04:20 crc kubenswrapper[5039]: I1124 14:04:20.466543 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-92d4g" Nov 24 14:04:21 crc kubenswrapper[5039]: I1124 14:04:21.024906 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-92d4g"] Nov 24 14:04:21 crc kubenswrapper[5039]: I1124 14:04:21.532750 5039 generic.go:334] "Generic (PLEG): container finished" podID="09894d54-c8e4-4ba8-acf2-e3e47bd32e31" containerID="79dd325bc13f3e640c23a077707e774d015edccd45de0ac57add37160b5eee65" exitCode=0 Nov 24 14:04:21 crc kubenswrapper[5039]: I1124 14:04:21.532930 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-92d4g" event={"ID":"09894d54-c8e4-4ba8-acf2-e3e47bd32e31","Type":"ContainerDied","Data":"79dd325bc13f3e640c23a077707e774d015edccd45de0ac57add37160b5eee65"} Nov 24 14:04:21 crc kubenswrapper[5039]: I1124 14:04:21.533789 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-92d4g" event={"ID":"09894d54-c8e4-4ba8-acf2-e3e47bd32e31","Type":"ContainerStarted","Data":"668af1aca5b74bb5db78bcd23eb2237c0a1434483cee4b7e5c1055cbf8e7f595"} Nov 24 14:04:21 crc kubenswrapper[5039]: I1124 14:04:21.535177 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 14:04:22 crc kubenswrapper[5039]: I1124 14:04:22.548017 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-92d4g" event={"ID":"09894d54-c8e4-4ba8-acf2-e3e47bd32e31","Type":"ContainerStarted","Data":"7ec5dd5f769f198dcd8a2e983c05fe8001b1b142f538a98d30381e5abe4274fb"} Nov 24 14:04:24 crc kubenswrapper[5039]: I1124 14:04:24.578872 5039 generic.go:334] "Generic (PLEG): container finished" podID="09894d54-c8e4-4ba8-acf2-e3e47bd32e31" containerID="7ec5dd5f769f198dcd8a2e983c05fe8001b1b142f538a98d30381e5abe4274fb" exitCode=0 Nov 24 14:04:24 crc kubenswrapper[5039]: I1124 14:04:24.579093 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-92d4g" event={"ID":"09894d54-c8e4-4ba8-acf2-e3e47bd32e31","Type":"ContainerDied","Data":"7ec5dd5f769f198dcd8a2e983c05fe8001b1b142f538a98d30381e5abe4274fb"} Nov 24 14:04:25 crc kubenswrapper[5039]: I1124 14:04:25.592008 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-92d4g" event={"ID":"09894d54-c8e4-4ba8-acf2-e3e47bd32e31","Type":"ContainerStarted","Data":"ce62c4eda11b08031af35c2593297154a456326f214d3ab639bb6af621a875fc"} Nov 24 14:04:25 crc kubenswrapper[5039]: I1124 14:04:25.620475 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-92d4g" podStartSLOduration=2.034982273 podStartE2EDuration="5.620450328s" podCreationTimestamp="2025-11-24 14:04:20 +0000 UTC" firstStartedPulling="2025-11-24 14:04:21.534910488 +0000 UTC m=+2773.974035008" lastFinishedPulling="2025-11-24 14:04:25.120378533 +0000 UTC m=+2777.559503063" observedRunningTime="2025-11-24 14:04:25.607432208 +0000 UTC m=+2778.046556718" watchObservedRunningTime="2025-11-24 14:04:25.620450328 +0000 UTC m=+2778.059574848" Nov 24 14:04:29 crc kubenswrapper[5039]: I1124 14:04:29.417141 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6rgc8"] Nov 24 14:04:29 crc kubenswrapper[5039]: I1124 14:04:29.421871 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6rgc8" Nov 24 14:04:29 crc kubenswrapper[5039]: I1124 14:04:29.430027 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6rgc8"] Nov 24 14:04:29 crc kubenswrapper[5039]: I1124 14:04:29.486190 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352-catalog-content\") pod \"redhat-marketplace-6rgc8\" (UID: \"d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352\") " pod="openshift-marketplace/redhat-marketplace-6rgc8" Nov 24 14:04:29 crc kubenswrapper[5039]: I1124 14:04:29.486537 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352-utilities\") pod \"redhat-marketplace-6rgc8\" (UID: \"d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352\") " pod="openshift-marketplace/redhat-marketplace-6rgc8" Nov 24 14:04:29 crc kubenswrapper[5039]: I1124 14:04:29.486636 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rj6zx\" (UniqueName: \"kubernetes.io/projected/d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352-kube-api-access-rj6zx\") pod \"redhat-marketplace-6rgc8\" (UID: \"d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352\") " pod="openshift-marketplace/redhat-marketplace-6rgc8" Nov 24 14:04:29 crc kubenswrapper[5039]: I1124 14:04:29.588691 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352-catalog-content\") pod \"redhat-marketplace-6rgc8\" (UID: \"d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352\") " pod="openshift-marketplace/redhat-marketplace-6rgc8" Nov 24 14:04:29 crc kubenswrapper[5039]: I1124 14:04:29.588764 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352-utilities\") pod \"redhat-marketplace-6rgc8\" (UID: \"d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352\") " pod="openshift-marketplace/redhat-marketplace-6rgc8" Nov 24 14:04:29 crc kubenswrapper[5039]: I1124 14:04:29.588834 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rj6zx\" (UniqueName: \"kubernetes.io/projected/d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352-kube-api-access-rj6zx\") pod \"redhat-marketplace-6rgc8\" (UID: \"d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352\") " pod="openshift-marketplace/redhat-marketplace-6rgc8" Nov 24 14:04:29 crc kubenswrapper[5039]: I1124 14:04:29.589649 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352-catalog-content\") pod \"redhat-marketplace-6rgc8\" (UID: \"d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352\") " pod="openshift-marketplace/redhat-marketplace-6rgc8" Nov 24 14:04:29 crc kubenswrapper[5039]: I1124 14:04:29.589924 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352-utilities\") pod \"redhat-marketplace-6rgc8\" (UID: \"d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352\") " pod="openshift-marketplace/redhat-marketplace-6rgc8" Nov 24 14:04:29 crc kubenswrapper[5039]: I1124 14:04:29.611997 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rj6zx\" (UniqueName: \"kubernetes.io/projected/d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352-kube-api-access-rj6zx\") pod \"redhat-marketplace-6rgc8\" (UID: \"d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352\") " pod="openshift-marketplace/redhat-marketplace-6rgc8" Nov 24 14:04:29 crc kubenswrapper[5039]: I1124 14:04:29.757343 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6rgc8" Nov 24 14:04:30 crc kubenswrapper[5039]: I1124 14:04:30.238026 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6rgc8"] Nov 24 14:04:30 crc kubenswrapper[5039]: W1124 14:04:30.239146 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd9d9bc3b_f9a1_4dbd_8756_cdb3bbd5e352.slice/crio-923dd4d6d7413e1c7512ae40325915a739b3a9ee61be7ff683982174b623a056 WatchSource:0}: Error finding container 923dd4d6d7413e1c7512ae40325915a739b3a9ee61be7ff683982174b623a056: Status 404 returned error can't find the container with id 923dd4d6d7413e1c7512ae40325915a739b3a9ee61be7ff683982174b623a056 Nov 24 14:04:30 crc kubenswrapper[5039]: I1124 14:04:30.468366 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-92d4g" Nov 24 14:04:30 crc kubenswrapper[5039]: I1124 14:04:30.468423 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-92d4g" Nov 24 14:04:30 crc kubenswrapper[5039]: I1124 14:04:30.537543 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-92d4g" Nov 24 14:04:30 crc kubenswrapper[5039]: I1124 14:04:30.643985 5039 generic.go:334] "Generic (PLEG): container finished" podID="d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352" containerID="0f96fa3161af3d22f4d1d26f20e77b0f4e66c1a4e8211fea8cd8163ecbe0c5d0" exitCode=0 Nov 24 14:04:30 crc kubenswrapper[5039]: I1124 14:04:30.644093 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6rgc8" event={"ID":"d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352","Type":"ContainerDied","Data":"0f96fa3161af3d22f4d1d26f20e77b0f4e66c1a4e8211fea8cd8163ecbe0c5d0"} Nov 24 14:04:30 crc kubenswrapper[5039]: I1124 14:04:30.644140 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6rgc8" event={"ID":"d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352","Type":"ContainerStarted","Data":"923dd4d6d7413e1c7512ae40325915a739b3a9ee61be7ff683982174b623a056"} Nov 24 14:04:30 crc kubenswrapper[5039]: I1124 14:04:30.692477 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-92d4g" Nov 24 14:04:32 crc kubenswrapper[5039]: I1124 14:04:32.668698 5039 generic.go:334] "Generic (PLEG): container finished" podID="d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352" containerID="ae77afa0b406e98167c67708362cab3978e34ea5a7a40fafdce57d2ea2576568" exitCode=0 Nov 24 14:04:32 crc kubenswrapper[5039]: I1124 14:04:32.668757 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6rgc8" event={"ID":"d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352","Type":"ContainerDied","Data":"ae77afa0b406e98167c67708362cab3978e34ea5a7a40fafdce57d2ea2576568"} Nov 24 14:04:32 crc kubenswrapper[5039]: I1124 14:04:32.784838 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-92d4g"] Nov 24 14:04:32 crc kubenswrapper[5039]: I1124 14:04:32.785102 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-92d4g" podUID="09894d54-c8e4-4ba8-acf2-e3e47bd32e31" containerName="registry-server" containerID="cri-o://ce62c4eda11b08031af35c2593297154a456326f214d3ab639bb6af621a875fc" gracePeriod=2 Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.365798 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-92d4g" Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.495776 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mk7rk\" (UniqueName: \"kubernetes.io/projected/09894d54-c8e4-4ba8-acf2-e3e47bd32e31-kube-api-access-mk7rk\") pod \"09894d54-c8e4-4ba8-acf2-e3e47bd32e31\" (UID: \"09894d54-c8e4-4ba8-acf2-e3e47bd32e31\") " Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.495875 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09894d54-c8e4-4ba8-acf2-e3e47bd32e31-catalog-content\") pod \"09894d54-c8e4-4ba8-acf2-e3e47bd32e31\" (UID: \"09894d54-c8e4-4ba8-acf2-e3e47bd32e31\") " Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.496127 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09894d54-c8e4-4ba8-acf2-e3e47bd32e31-utilities\") pod \"09894d54-c8e4-4ba8-acf2-e3e47bd32e31\" (UID: \"09894d54-c8e4-4ba8-acf2-e3e47bd32e31\") " Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.496977 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09894d54-c8e4-4ba8-acf2-e3e47bd32e31-utilities" (OuterVolumeSpecName: "utilities") pod "09894d54-c8e4-4ba8-acf2-e3e47bd32e31" (UID: "09894d54-c8e4-4ba8-acf2-e3e47bd32e31"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.516637 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09894d54-c8e4-4ba8-acf2-e3e47bd32e31-kube-api-access-mk7rk" (OuterVolumeSpecName: "kube-api-access-mk7rk") pod "09894d54-c8e4-4ba8-acf2-e3e47bd32e31" (UID: "09894d54-c8e4-4ba8-acf2-e3e47bd32e31"). InnerVolumeSpecName "kube-api-access-mk7rk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.536958 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09894d54-c8e4-4ba8-acf2-e3e47bd32e31-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "09894d54-c8e4-4ba8-acf2-e3e47bd32e31" (UID: "09894d54-c8e4-4ba8-acf2-e3e47bd32e31"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.598904 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09894d54-c8e4-4ba8-acf2-e3e47bd32e31-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.598935 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09894d54-c8e4-4ba8-acf2-e3e47bd32e31-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.598948 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mk7rk\" (UniqueName: \"kubernetes.io/projected/09894d54-c8e4-4ba8-acf2-e3e47bd32e31-kube-api-access-mk7rk\") on node \"crc\" DevicePath \"\"" Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.678936 5039 generic.go:334] "Generic (PLEG): container finished" podID="09894d54-c8e4-4ba8-acf2-e3e47bd32e31" containerID="ce62c4eda11b08031af35c2593297154a456326f214d3ab639bb6af621a875fc" exitCode=0 Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.679002 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-92d4g" Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.678985 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-92d4g" event={"ID":"09894d54-c8e4-4ba8-acf2-e3e47bd32e31","Type":"ContainerDied","Data":"ce62c4eda11b08031af35c2593297154a456326f214d3ab639bb6af621a875fc"} Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.679149 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-92d4g" event={"ID":"09894d54-c8e4-4ba8-acf2-e3e47bd32e31","Type":"ContainerDied","Data":"668af1aca5b74bb5db78bcd23eb2237c0a1434483cee4b7e5c1055cbf8e7f595"} Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.679170 5039 scope.go:117] "RemoveContainer" containerID="ce62c4eda11b08031af35c2593297154a456326f214d3ab639bb6af621a875fc" Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.682794 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6rgc8" event={"ID":"d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352","Type":"ContainerStarted","Data":"8adc3ad4f7b46f957643a663b9e8d3b4c261a9cf0bdf0611db74d6c96b281c6a"} Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.705948 5039 scope.go:117] "RemoveContainer" containerID="7ec5dd5f769f198dcd8a2e983c05fe8001b1b142f538a98d30381e5abe4274fb" Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.710387 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6rgc8" podStartSLOduration=2.246788456 podStartE2EDuration="4.710369098s" podCreationTimestamp="2025-11-24 14:04:29 +0000 UTC" firstStartedPulling="2025-11-24 14:04:30.645539835 +0000 UTC m=+2783.084664335" lastFinishedPulling="2025-11-24 14:04:33.109120477 +0000 UTC m=+2785.548244977" observedRunningTime="2025-11-24 14:04:33.701653373 +0000 UTC m=+2786.140777873" watchObservedRunningTime="2025-11-24 14:04:33.710369098 +0000 UTC m=+2786.149493598" Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.724736 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-92d4g"] Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.734752 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-92d4g"] Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.750413 5039 scope.go:117] "RemoveContainer" containerID="79dd325bc13f3e640c23a077707e774d015edccd45de0ac57add37160b5eee65" Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.773636 5039 scope.go:117] "RemoveContainer" containerID="ce62c4eda11b08031af35c2593297154a456326f214d3ab639bb6af621a875fc" Nov 24 14:04:33 crc kubenswrapper[5039]: E1124 14:04:33.774201 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce62c4eda11b08031af35c2593297154a456326f214d3ab639bb6af621a875fc\": container with ID starting with ce62c4eda11b08031af35c2593297154a456326f214d3ab639bb6af621a875fc not found: ID does not exist" containerID="ce62c4eda11b08031af35c2593297154a456326f214d3ab639bb6af621a875fc" Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.774249 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce62c4eda11b08031af35c2593297154a456326f214d3ab639bb6af621a875fc"} err="failed to get container status \"ce62c4eda11b08031af35c2593297154a456326f214d3ab639bb6af621a875fc\": rpc error: code = NotFound desc = could not find container \"ce62c4eda11b08031af35c2593297154a456326f214d3ab639bb6af621a875fc\": container with ID starting with ce62c4eda11b08031af35c2593297154a456326f214d3ab639bb6af621a875fc not found: ID does not exist" Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.774283 5039 scope.go:117] "RemoveContainer" containerID="7ec5dd5f769f198dcd8a2e983c05fe8001b1b142f538a98d30381e5abe4274fb" Nov 24 14:04:33 crc kubenswrapper[5039]: E1124 14:04:33.775657 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ec5dd5f769f198dcd8a2e983c05fe8001b1b142f538a98d30381e5abe4274fb\": container with ID starting with 7ec5dd5f769f198dcd8a2e983c05fe8001b1b142f538a98d30381e5abe4274fb not found: ID does not exist" containerID="7ec5dd5f769f198dcd8a2e983c05fe8001b1b142f538a98d30381e5abe4274fb" Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.775694 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ec5dd5f769f198dcd8a2e983c05fe8001b1b142f538a98d30381e5abe4274fb"} err="failed to get container status \"7ec5dd5f769f198dcd8a2e983c05fe8001b1b142f538a98d30381e5abe4274fb\": rpc error: code = NotFound desc = could not find container \"7ec5dd5f769f198dcd8a2e983c05fe8001b1b142f538a98d30381e5abe4274fb\": container with ID starting with 7ec5dd5f769f198dcd8a2e983c05fe8001b1b142f538a98d30381e5abe4274fb not found: ID does not exist" Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.775716 5039 scope.go:117] "RemoveContainer" containerID="79dd325bc13f3e640c23a077707e774d015edccd45de0ac57add37160b5eee65" Nov 24 14:04:33 crc kubenswrapper[5039]: E1124 14:04:33.776712 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79dd325bc13f3e640c23a077707e774d015edccd45de0ac57add37160b5eee65\": container with ID starting with 79dd325bc13f3e640c23a077707e774d015edccd45de0ac57add37160b5eee65 not found: ID does not exist" containerID="79dd325bc13f3e640c23a077707e774d015edccd45de0ac57add37160b5eee65" Nov 24 14:04:33 crc kubenswrapper[5039]: I1124 14:04:33.776761 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79dd325bc13f3e640c23a077707e774d015edccd45de0ac57add37160b5eee65"} err="failed to get container status \"79dd325bc13f3e640c23a077707e774d015edccd45de0ac57add37160b5eee65\": rpc error: code = NotFound desc = could not find container \"79dd325bc13f3e640c23a077707e774d015edccd45de0ac57add37160b5eee65\": container with ID starting with 79dd325bc13f3e640c23a077707e774d015edccd45de0ac57add37160b5eee65 not found: ID does not exist" Nov 24 14:04:34 crc kubenswrapper[5039]: I1124 14:04:34.319213 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09894d54-c8e4-4ba8-acf2-e3e47bd32e31" path="/var/lib/kubelet/pods/09894d54-c8e4-4ba8-acf2-e3e47bd32e31/volumes" Nov 24 14:04:39 crc kubenswrapper[5039]: I1124 14:04:39.758758 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6rgc8" Nov 24 14:04:39 crc kubenswrapper[5039]: I1124 14:04:39.759577 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6rgc8" Nov 24 14:04:39 crc kubenswrapper[5039]: I1124 14:04:39.837648 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6rgc8" Nov 24 14:04:40 crc kubenswrapper[5039]: I1124 14:04:40.839293 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6rgc8" Nov 24 14:04:40 crc kubenswrapper[5039]: I1124 14:04:40.919219 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6rgc8"] Nov 24 14:04:42 crc kubenswrapper[5039]: I1124 14:04:42.789458 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6rgc8" podUID="d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352" containerName="registry-server" containerID="cri-o://8adc3ad4f7b46f957643a663b9e8d3b4c261a9cf0bdf0611db74d6c96b281c6a" gracePeriod=2 Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.319157 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6rgc8" Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.340560 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352-catalog-content\") pod \"d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352\" (UID: \"d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352\") " Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.340717 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rj6zx\" (UniqueName: \"kubernetes.io/projected/d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352-kube-api-access-rj6zx\") pod \"d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352\" (UID: \"d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352\") " Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.340804 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352-utilities\") pod \"d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352\" (UID: \"d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352\") " Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.341759 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352-utilities" (OuterVolumeSpecName: "utilities") pod "d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352" (UID: "d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.342469 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.368752 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352-kube-api-access-rj6zx" (OuterVolumeSpecName: "kube-api-access-rj6zx") pod "d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352" (UID: "d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352"). InnerVolumeSpecName "kube-api-access-rj6zx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.370564 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352" (UID: "d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.444549 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.444770 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rj6zx\" (UniqueName: \"kubernetes.io/projected/d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352-kube-api-access-rj6zx\") on node \"crc\" DevicePath \"\"" Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.806187 5039 generic.go:334] "Generic (PLEG): container finished" podID="d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352" containerID="8adc3ad4f7b46f957643a663b9e8d3b4c261a9cf0bdf0611db74d6c96b281c6a" exitCode=0 Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.806260 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6rgc8" event={"ID":"d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352","Type":"ContainerDied","Data":"8adc3ad4f7b46f957643a663b9e8d3b4c261a9cf0bdf0611db74d6c96b281c6a"} Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.806281 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6rgc8" Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.806316 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6rgc8" event={"ID":"d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352","Type":"ContainerDied","Data":"923dd4d6d7413e1c7512ae40325915a739b3a9ee61be7ff683982174b623a056"} Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.806356 5039 scope.go:117] "RemoveContainer" containerID="8adc3ad4f7b46f957643a663b9e8d3b4c261a9cf0bdf0611db74d6c96b281c6a" Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.847593 5039 scope.go:117] "RemoveContainer" containerID="ae77afa0b406e98167c67708362cab3978e34ea5a7a40fafdce57d2ea2576568" Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.851250 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6rgc8"] Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.865119 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6rgc8"] Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.885628 5039 scope.go:117] "RemoveContainer" containerID="0f96fa3161af3d22f4d1d26f20e77b0f4e66c1a4e8211fea8cd8163ecbe0c5d0" Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.952548 5039 scope.go:117] "RemoveContainer" containerID="8adc3ad4f7b46f957643a663b9e8d3b4c261a9cf0bdf0611db74d6c96b281c6a" Nov 24 14:04:43 crc kubenswrapper[5039]: E1124 14:04:43.953107 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8adc3ad4f7b46f957643a663b9e8d3b4c261a9cf0bdf0611db74d6c96b281c6a\": container with ID starting with 8adc3ad4f7b46f957643a663b9e8d3b4c261a9cf0bdf0611db74d6c96b281c6a not found: ID does not exist" containerID="8adc3ad4f7b46f957643a663b9e8d3b4c261a9cf0bdf0611db74d6c96b281c6a" Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.953217 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8adc3ad4f7b46f957643a663b9e8d3b4c261a9cf0bdf0611db74d6c96b281c6a"} err="failed to get container status \"8adc3ad4f7b46f957643a663b9e8d3b4c261a9cf0bdf0611db74d6c96b281c6a\": rpc error: code = NotFound desc = could not find container \"8adc3ad4f7b46f957643a663b9e8d3b4c261a9cf0bdf0611db74d6c96b281c6a\": container with ID starting with 8adc3ad4f7b46f957643a663b9e8d3b4c261a9cf0bdf0611db74d6c96b281c6a not found: ID does not exist" Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.953289 5039 scope.go:117] "RemoveContainer" containerID="ae77afa0b406e98167c67708362cab3978e34ea5a7a40fafdce57d2ea2576568" Nov 24 14:04:43 crc kubenswrapper[5039]: E1124 14:04:43.953797 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae77afa0b406e98167c67708362cab3978e34ea5a7a40fafdce57d2ea2576568\": container with ID starting with ae77afa0b406e98167c67708362cab3978e34ea5a7a40fafdce57d2ea2576568 not found: ID does not exist" containerID="ae77afa0b406e98167c67708362cab3978e34ea5a7a40fafdce57d2ea2576568" Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.953826 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae77afa0b406e98167c67708362cab3978e34ea5a7a40fafdce57d2ea2576568"} err="failed to get container status \"ae77afa0b406e98167c67708362cab3978e34ea5a7a40fafdce57d2ea2576568\": rpc error: code = NotFound desc = could not find container \"ae77afa0b406e98167c67708362cab3978e34ea5a7a40fafdce57d2ea2576568\": container with ID starting with ae77afa0b406e98167c67708362cab3978e34ea5a7a40fafdce57d2ea2576568 not found: ID does not exist" Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.953846 5039 scope.go:117] "RemoveContainer" containerID="0f96fa3161af3d22f4d1d26f20e77b0f4e66c1a4e8211fea8cd8163ecbe0c5d0" Nov 24 14:04:43 crc kubenswrapper[5039]: E1124 14:04:43.954161 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f96fa3161af3d22f4d1d26f20e77b0f4e66c1a4e8211fea8cd8163ecbe0c5d0\": container with ID starting with 0f96fa3161af3d22f4d1d26f20e77b0f4e66c1a4e8211fea8cd8163ecbe0c5d0 not found: ID does not exist" containerID="0f96fa3161af3d22f4d1d26f20e77b0f4e66c1a4e8211fea8cd8163ecbe0c5d0" Nov 24 14:04:43 crc kubenswrapper[5039]: I1124 14:04:43.954262 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f96fa3161af3d22f4d1d26f20e77b0f4e66c1a4e8211fea8cd8163ecbe0c5d0"} err="failed to get container status \"0f96fa3161af3d22f4d1d26f20e77b0f4e66c1a4e8211fea8cd8163ecbe0c5d0\": rpc error: code = NotFound desc = could not find container \"0f96fa3161af3d22f4d1d26f20e77b0f4e66c1a4e8211fea8cd8163ecbe0c5d0\": container with ID starting with 0f96fa3161af3d22f4d1d26f20e77b0f4e66c1a4e8211fea8cd8163ecbe0c5d0 not found: ID does not exist" Nov 24 14:04:44 crc kubenswrapper[5039]: I1124 14:04:44.336768 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352" path="/var/lib/kubelet/pods/d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352/volumes" Nov 24 14:05:20 crc kubenswrapper[5039]: I1124 14:05:20.100993 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:05:20 crc kubenswrapper[5039]: I1124 14:05:20.101505 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:05:50 crc kubenswrapper[5039]: I1124 14:05:50.101388 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:05:50 crc kubenswrapper[5039]: I1124 14:05:50.102012 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:06:20 crc kubenswrapper[5039]: I1124 14:06:20.101839 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:06:20 crc kubenswrapper[5039]: I1124 14:06:20.102344 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:06:20 crc kubenswrapper[5039]: I1124 14:06:20.102384 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 14:06:20 crc kubenswrapper[5039]: I1124 14:06:20.103166 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 14:06:20 crc kubenswrapper[5039]: I1124 14:06:20.103231 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" gracePeriod=600 Nov 24 14:06:20 crc kubenswrapper[5039]: E1124 14:06:20.223495 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:06:20 crc kubenswrapper[5039]: I1124 14:06:20.962480 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" exitCode=0 Nov 24 14:06:20 crc kubenswrapper[5039]: I1124 14:06:20.962625 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36"} Nov 24 14:06:20 crc kubenswrapper[5039]: I1124 14:06:20.962895 5039 scope.go:117] "RemoveContainer" containerID="f4df9e2b4ae7b3d6fb4f3a538dbbb6b8373edbff29ee3cbb674417bf32f805d0" Nov 24 14:06:20 crc kubenswrapper[5039]: I1124 14:06:20.964030 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:06:20 crc kubenswrapper[5039]: E1124 14:06:20.964787 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:06:34 crc kubenswrapper[5039]: I1124 14:06:34.306357 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:06:34 crc kubenswrapper[5039]: E1124 14:06:34.307300 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:06:48 crc kubenswrapper[5039]: I1124 14:06:48.323524 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:06:48 crc kubenswrapper[5039]: E1124 14:06:48.324389 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:07:01 crc kubenswrapper[5039]: I1124 14:07:01.307633 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:07:01 crc kubenswrapper[5039]: E1124 14:07:01.308827 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:07:13 crc kubenswrapper[5039]: I1124 14:07:13.306912 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:07:13 crc kubenswrapper[5039]: E1124 14:07:13.307734 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:07:27 crc kubenswrapper[5039]: I1124 14:07:27.306402 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:07:27 crc kubenswrapper[5039]: E1124 14:07:27.307296 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.775923 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.784672 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.795598 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.804366 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.813398 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.825168 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-g2n4f"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.835973 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-fll5v"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.845262 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-xl22h"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.853578 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w85rg"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.862310 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qdzxx"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.871602 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.882666 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.891405 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-qwxfg"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.900401 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-nnvjh"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.908886 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.917666 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-475lg"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.925428 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.936322 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.947742 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.971391 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.980760 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.988410 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb"] Nov 24 14:07:37 crc kubenswrapper[5039]: I1124 14:07:37.996652 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z"] Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.006619 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-vk2n7"] Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.015244 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-kkrxb"] Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.022793 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-7sw8z"] Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.030358 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-vk2n7"] Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.037167 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-zkwdd"] Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.043949 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-vf74j"] Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.051750 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-lh74z"] Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.059454 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-k56rw"] Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.066759 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jlbxw"] Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.314915 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:07:38 crc kubenswrapper[5039]: E1124 14:07:38.315164 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.322643 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06972240-fdee-4d23-a066-5919ba1abd8c" path="/var/lib/kubelet/pods/06972240-fdee-4d23-a066-5919ba1abd8c/volumes" Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.323524 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11aaad36-5e7f-4f08-b7fd-9a547c514331" path="/var/lib/kubelet/pods/11aaad36-5e7f-4f08-b7fd-9a547c514331/volumes" Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.324142 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15cc66e4-c047-4ab8-b10d-9e54fd7ef393" path="/var/lib/kubelet/pods/15cc66e4-c047-4ab8-b10d-9e54fd7ef393/volumes" Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.324779 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3fe12f60-6522-4328-883c-2d2d05054d9e" path="/var/lib/kubelet/pods/3fe12f60-6522-4328-883c-2d2d05054d9e/volumes" Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.325892 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2" path="/var/lib/kubelet/pods/4bde6dc0-d7b6-4410-8ee7-5a106c15d1b2/volumes" Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.326528 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e8bb9f6-b0a6-4237-88cc-f99bd22f4784" path="/var/lib/kubelet/pods/4e8bb9f6-b0a6-4237-88cc-f99bd22f4784/volumes" Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.327113 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a0cc535-f6a2-4a08-acfc-fa0c605359bd" path="/var/lib/kubelet/pods/5a0cc535-f6a2-4a08-acfc-fa0c605359bd/volumes" Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.328302 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6139740a-dbfc-41c6-baf0-9651b805c47c" path="/var/lib/kubelet/pods/6139740a-dbfc-41c6-baf0-9651b805c47c/volumes" Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.328906 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6da7bf95-9494-43c0-be01-d2170fe36b61" path="/var/lib/kubelet/pods/6da7bf95-9494-43c0-be01-d2170fe36b61/volumes" Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.329463 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84ee730b-5dc4-4bbb-b817-4f65942865b6" path="/var/lib/kubelet/pods/84ee730b-5dc4-4bbb-b817-4f65942865b6/volumes" Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.330532 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b4db333-74a9-4093-bbc4-51bd0e48d5d1" path="/var/lib/kubelet/pods/8b4db333-74a9-4093-bbc4-51bd0e48d5d1/volumes" Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.331097 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93e04953-49f7-41fb-b2fb-514d4ed838e9" path="/var/lib/kubelet/pods/93e04953-49f7-41fb-b2fb-514d4ed838e9/volumes" Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.331683 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d" path="/var/lib/kubelet/pods/a0c6fa43-484a-4f4d-b77e-b36fc7c82f9d/volumes" Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.332293 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a59c2a20-4a1f-4b68-aec2-5e2005f42418" path="/var/lib/kubelet/pods/a59c2a20-4a1f-4b68-aec2-5e2005f42418/volumes" Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.333372 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be684f03-c6b3-4538-a113-1c4a1873dc96" path="/var/lib/kubelet/pods/be684f03-c6b3-4538-a113-1c4a1873dc96/volumes" Nov 24 14:07:38 crc kubenswrapper[5039]: I1124 14:07:38.333992 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c01a55af-b9b1-4958-b31e-b7b47527b7e2" path="/var/lib/kubelet/pods/c01a55af-b9b1-4958-b31e-b7b47527b7e2/volumes" Nov 24 14:07:42 crc kubenswrapper[5039]: I1124 14:07:42.932910 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj"] Nov 24 14:07:42 crc kubenswrapper[5039]: E1124 14:07:42.933968 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09894d54-c8e4-4ba8-acf2-e3e47bd32e31" containerName="extract-content" Nov 24 14:07:42 crc kubenswrapper[5039]: I1124 14:07:42.933984 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="09894d54-c8e4-4ba8-acf2-e3e47bd32e31" containerName="extract-content" Nov 24 14:07:42 crc kubenswrapper[5039]: E1124 14:07:42.934001 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352" containerName="registry-server" Nov 24 14:07:42 crc kubenswrapper[5039]: I1124 14:07:42.934009 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352" containerName="registry-server" Nov 24 14:07:42 crc kubenswrapper[5039]: E1124 14:07:42.934035 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09894d54-c8e4-4ba8-acf2-e3e47bd32e31" containerName="registry-server" Nov 24 14:07:42 crc kubenswrapper[5039]: I1124 14:07:42.934044 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="09894d54-c8e4-4ba8-acf2-e3e47bd32e31" containerName="registry-server" Nov 24 14:07:42 crc kubenswrapper[5039]: E1124 14:07:42.934066 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352" containerName="extract-utilities" Nov 24 14:07:42 crc kubenswrapper[5039]: I1124 14:07:42.934074 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352" containerName="extract-utilities" Nov 24 14:07:42 crc kubenswrapper[5039]: E1124 14:07:42.934087 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352" containerName="extract-content" Nov 24 14:07:42 crc kubenswrapper[5039]: I1124 14:07:42.934095 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352" containerName="extract-content" Nov 24 14:07:42 crc kubenswrapper[5039]: E1124 14:07:42.934118 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09894d54-c8e4-4ba8-acf2-e3e47bd32e31" containerName="extract-utilities" Nov 24 14:07:42 crc kubenswrapper[5039]: I1124 14:07:42.934126 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="09894d54-c8e4-4ba8-acf2-e3e47bd32e31" containerName="extract-utilities" Nov 24 14:07:42 crc kubenswrapper[5039]: I1124 14:07:42.934405 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9d9bc3b-f9a1-4dbd-8756-cdb3bbd5e352" containerName="registry-server" Nov 24 14:07:42 crc kubenswrapper[5039]: I1124 14:07:42.934425 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="09894d54-c8e4-4ba8-acf2-e3e47bd32e31" containerName="registry-server" Nov 24 14:07:42 crc kubenswrapper[5039]: I1124 14:07:42.935474 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" Nov 24 14:07:42 crc kubenswrapper[5039]: I1124 14:07:42.939382 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 14:07:42 crc kubenswrapper[5039]: I1124 14:07:42.939704 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 14:07:42 crc kubenswrapper[5039]: I1124 14:07:42.939869 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 14:07:42 crc kubenswrapper[5039]: I1124 14:07:42.940166 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 24 14:07:42 crc kubenswrapper[5039]: I1124 14:07:42.940303 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 14:07:42 crc kubenswrapper[5039]: I1124 14:07:42.945136 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj"] Nov 24 14:07:43 crc kubenswrapper[5039]: I1124 14:07:43.070715 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nm5s8\" (UniqueName: \"kubernetes.io/projected/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-kube-api-access-nm5s8\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj\" (UID: \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" Nov 24 14:07:43 crc kubenswrapper[5039]: I1124 14:07:43.070788 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj\" (UID: \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" Nov 24 14:07:43 crc kubenswrapper[5039]: I1124 14:07:43.070818 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj\" (UID: \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" Nov 24 14:07:43 crc kubenswrapper[5039]: I1124 14:07:43.070848 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj\" (UID: \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" Nov 24 14:07:43 crc kubenswrapper[5039]: I1124 14:07:43.070875 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj\" (UID: \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" Nov 24 14:07:43 crc kubenswrapper[5039]: I1124 14:07:43.172716 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nm5s8\" (UniqueName: \"kubernetes.io/projected/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-kube-api-access-nm5s8\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj\" (UID: \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" Nov 24 14:07:43 crc kubenswrapper[5039]: I1124 14:07:43.173125 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj\" (UID: \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" Nov 24 14:07:43 crc kubenswrapper[5039]: I1124 14:07:43.173148 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj\" (UID: \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" Nov 24 14:07:43 crc kubenswrapper[5039]: I1124 14:07:43.173183 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj\" (UID: \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" Nov 24 14:07:43 crc kubenswrapper[5039]: I1124 14:07:43.173210 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj\" (UID: \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" Nov 24 14:07:43 crc kubenswrapper[5039]: I1124 14:07:43.178992 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj\" (UID: \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" Nov 24 14:07:43 crc kubenswrapper[5039]: I1124 14:07:43.179964 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj\" (UID: \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" Nov 24 14:07:43 crc kubenswrapper[5039]: I1124 14:07:43.180274 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj\" (UID: \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" Nov 24 14:07:43 crc kubenswrapper[5039]: I1124 14:07:43.180902 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj\" (UID: \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" Nov 24 14:07:43 crc kubenswrapper[5039]: I1124 14:07:43.191545 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nm5s8\" (UniqueName: \"kubernetes.io/projected/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-kube-api-access-nm5s8\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj\" (UID: \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" Nov 24 14:07:43 crc kubenswrapper[5039]: I1124 14:07:43.258023 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" Nov 24 14:07:43 crc kubenswrapper[5039]: I1124 14:07:43.786928 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj"] Nov 24 14:07:43 crc kubenswrapper[5039]: W1124 14:07:43.788834 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2bf2f79_d7fa_47ca_a8fc_b48d77875208.slice/crio-c384997ad65c5d6e9d4bc71bee3f3930b3cc361df81d86cf00496b664b9d160e WatchSource:0}: Error finding container c384997ad65c5d6e9d4bc71bee3f3930b3cc361df81d86cf00496b664b9d160e: Status 404 returned error can't find the container with id c384997ad65c5d6e9d4bc71bee3f3930b3cc361df81d86cf00496b664b9d160e Nov 24 14:07:43 crc kubenswrapper[5039]: I1124 14:07:43.898461 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" event={"ID":"c2bf2f79-d7fa-47ca-a8fc-b48d77875208","Type":"ContainerStarted","Data":"c384997ad65c5d6e9d4bc71bee3f3930b3cc361df81d86cf00496b664b9d160e"} Nov 24 14:07:44 crc kubenswrapper[5039]: I1124 14:07:44.913646 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" event={"ID":"c2bf2f79-d7fa-47ca-a8fc-b48d77875208","Type":"ContainerStarted","Data":"5545c2fd1be839b6827ff9bf9dc42683abe22bcafe77127053b6dbaa608154cb"} Nov 24 14:07:44 crc kubenswrapper[5039]: I1124 14:07:44.935454 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" podStartSLOduration=2.298157364 podStartE2EDuration="2.935434426s" podCreationTimestamp="2025-11-24 14:07:42 +0000 UTC" firstStartedPulling="2025-11-24 14:07:43.790976793 +0000 UTC m=+2976.230101293" lastFinishedPulling="2025-11-24 14:07:44.428253845 +0000 UTC m=+2976.867378355" observedRunningTime="2025-11-24 14:07:44.930424713 +0000 UTC m=+2977.369549223" watchObservedRunningTime="2025-11-24 14:07:44.935434426 +0000 UTC m=+2977.374558926" Nov 24 14:07:50 crc kubenswrapper[5039]: I1124 14:07:50.558730 5039 scope.go:117] "RemoveContainer" containerID="14aa708a57729306105a393518cccb74152f935950252d1e3a4399ac6d5231c2" Nov 24 14:07:50 crc kubenswrapper[5039]: I1124 14:07:50.635471 5039 scope.go:117] "RemoveContainer" containerID="63592d0398b308a4df971917ac5eebaed0aa2eb42a6d0d162d20f66958c1aa9a" Nov 24 14:07:50 crc kubenswrapper[5039]: I1124 14:07:50.684970 5039 scope.go:117] "RemoveContainer" containerID="424d1a4d90c8dd5fcca1fcad01e4dcdac0f5a3dafc34fa5191ff12761c71a089" Nov 24 14:07:50 crc kubenswrapper[5039]: I1124 14:07:50.740321 5039 scope.go:117] "RemoveContainer" containerID="4211afdf0c538b3ca8ab02a3ce90a6f40a1b50557616fe05f0a5ba51799a8c2c" Nov 24 14:07:50 crc kubenswrapper[5039]: I1124 14:07:50.794292 5039 scope.go:117] "RemoveContainer" containerID="b63a3e6fa61c5c7303c139a7343c1ae2f293f8749843c9c11a84343e7e4ce862" Nov 24 14:07:50 crc kubenswrapper[5039]: I1124 14:07:50.849294 5039 scope.go:117] "RemoveContainer" containerID="df11dfed910cec147071faceefe217c350bc3d6c5205b50b5b4c6107402c0c04" Nov 24 14:07:50 crc kubenswrapper[5039]: I1124 14:07:50.888654 5039 scope.go:117] "RemoveContainer" containerID="0abd66b830377b3f095bef5412d2f6535c67f0a5ef4884b951e0bc53779ebaa3" Nov 24 14:07:50 crc kubenswrapper[5039]: I1124 14:07:50.925320 5039 scope.go:117] "RemoveContainer" containerID="69b7bd167c07f9152a849ca1f79e50d791343172ab45112561b89008bf4618f8" Nov 24 14:07:50 crc kubenswrapper[5039]: I1124 14:07:50.997310 5039 scope.go:117] "RemoveContainer" containerID="eda5c96b2c1990611eae5b33f4c3e26a4b1481705c37f1f2b0d0a548f56cc181" Nov 24 14:07:51 crc kubenswrapper[5039]: I1124 14:07:51.034216 5039 scope.go:117] "RemoveContainer" containerID="db2d40d56cbbf1c2559b007c38085278c481fa083c28d2a37ab39f26753af100" Nov 24 14:07:51 crc kubenswrapper[5039]: I1124 14:07:51.100714 5039 scope.go:117] "RemoveContainer" containerID="3a38d5d38903093c1f9fb37a359cb88145b981cc1729e11b4c7aedfb73c61f5a" Nov 24 14:07:51 crc kubenswrapper[5039]: I1124 14:07:51.149668 5039 scope.go:117] "RemoveContainer" containerID="1731a314676a903005df934daf142b0ba23e9c1bd8e163fa60023b8d117193f2" Nov 24 14:07:51 crc kubenswrapper[5039]: I1124 14:07:51.182387 5039 scope.go:117] "RemoveContainer" containerID="dfbd8717f8130fcbd0047bc6b44c0ceb4ee31d3c805868dd0686a1ad76ed3b11" Nov 24 14:07:51 crc kubenswrapper[5039]: I1124 14:07:51.219239 5039 scope.go:117] "RemoveContainer" containerID="fc5c312e28172a9c643ab8e61d40fa5cdac4a6a87edb94215824402e78dcf1f7" Nov 24 14:07:51 crc kubenswrapper[5039]: I1124 14:07:51.314584 5039 scope.go:117] "RemoveContainer" containerID="d767563b027f2775f56ec9462a59266a1b5d4e7da66a0cf000043e69842282a3" Nov 24 14:07:53 crc kubenswrapper[5039]: I1124 14:07:53.307043 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:07:53 crc kubenswrapper[5039]: E1124 14:07:53.307716 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:07:59 crc kubenswrapper[5039]: I1124 14:07:59.102100 5039 generic.go:334] "Generic (PLEG): container finished" podID="c2bf2f79-d7fa-47ca-a8fc-b48d77875208" containerID="5545c2fd1be839b6827ff9bf9dc42683abe22bcafe77127053b6dbaa608154cb" exitCode=0 Nov 24 14:07:59 crc kubenswrapper[5039]: I1124 14:07:59.102169 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" event={"ID":"c2bf2f79-d7fa-47ca-a8fc-b48d77875208","Type":"ContainerDied","Data":"5545c2fd1be839b6827ff9bf9dc42683abe22bcafe77127053b6dbaa608154cb"} Nov 24 14:08:00 crc kubenswrapper[5039]: I1124 14:08:00.596432 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" Nov 24 14:08:00 crc kubenswrapper[5039]: I1124 14:08:00.715152 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-repo-setup-combined-ca-bundle\") pod \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\" (UID: \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\") " Nov 24 14:08:00 crc kubenswrapper[5039]: I1124 14:08:00.715311 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nm5s8\" (UniqueName: \"kubernetes.io/projected/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-kube-api-access-nm5s8\") pod \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\" (UID: \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\") " Nov 24 14:08:00 crc kubenswrapper[5039]: I1124 14:08:00.715367 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-ceph\") pod \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\" (UID: \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\") " Nov 24 14:08:00 crc kubenswrapper[5039]: I1124 14:08:00.715489 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-ssh-key\") pod \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\" (UID: \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\") " Nov 24 14:08:00 crc kubenswrapper[5039]: I1124 14:08:00.715549 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-inventory\") pod \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\" (UID: \"c2bf2f79-d7fa-47ca-a8fc-b48d77875208\") " Nov 24 14:08:00 crc kubenswrapper[5039]: I1124 14:08:00.721439 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "c2bf2f79-d7fa-47ca-a8fc-b48d77875208" (UID: "c2bf2f79-d7fa-47ca-a8fc-b48d77875208"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:08:00 crc kubenswrapper[5039]: I1124 14:08:00.721750 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-kube-api-access-nm5s8" (OuterVolumeSpecName: "kube-api-access-nm5s8") pod "c2bf2f79-d7fa-47ca-a8fc-b48d77875208" (UID: "c2bf2f79-d7fa-47ca-a8fc-b48d77875208"). InnerVolumeSpecName "kube-api-access-nm5s8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:08:00 crc kubenswrapper[5039]: I1124 14:08:00.726599 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-ceph" (OuterVolumeSpecName: "ceph") pod "c2bf2f79-d7fa-47ca-a8fc-b48d77875208" (UID: "c2bf2f79-d7fa-47ca-a8fc-b48d77875208"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:08:00 crc kubenswrapper[5039]: I1124 14:08:00.750844 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c2bf2f79-d7fa-47ca-a8fc-b48d77875208" (UID: "c2bf2f79-d7fa-47ca-a8fc-b48d77875208"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:08:00 crc kubenswrapper[5039]: I1124 14:08:00.754483 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-inventory" (OuterVolumeSpecName: "inventory") pod "c2bf2f79-d7fa-47ca-a8fc-b48d77875208" (UID: "c2bf2f79-d7fa-47ca-a8fc-b48d77875208"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:08:00 crc kubenswrapper[5039]: I1124 14:08:00.817540 5039 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:08:00 crc kubenswrapper[5039]: I1124 14:08:00.817574 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nm5s8\" (UniqueName: \"kubernetes.io/projected/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-kube-api-access-nm5s8\") on node \"crc\" DevicePath \"\"" Nov 24 14:08:00 crc kubenswrapper[5039]: I1124 14:08:00.817588 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:08:00 crc kubenswrapper[5039]: I1124 14:08:00.817601 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:08:00 crc kubenswrapper[5039]: I1124 14:08:00.817611 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2bf2f79-d7fa-47ca-a8fc-b48d77875208-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.129043 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" event={"ID":"c2bf2f79-d7fa-47ca-a8fc-b48d77875208","Type":"ContainerDied","Data":"c384997ad65c5d6e9d4bc71bee3f3930b3cc361df81d86cf00496b664b9d160e"} Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.129107 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c384997ad65c5d6e9d4bc71bee3f3930b3cc361df81d86cf00496b664b9d160e" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.129181 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.215192 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp"] Nov 24 14:08:01 crc kubenswrapper[5039]: E1124 14:08:01.215664 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2bf2f79-d7fa-47ca-a8fc-b48d77875208" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.215684 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2bf2f79-d7fa-47ca-a8fc-b48d77875208" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.215851 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2bf2f79-d7fa-47ca-a8fc-b48d77875208" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.216617 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.219537 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.219537 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.222205 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.231681 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.231938 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.234927 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp\" (UID: \"1b1f6884-b4f4-4657-a039-930296794fbe\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.235016 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp\" (UID: \"1b1f6884-b4f4-4657-a039-930296794fbe\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.235067 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp\" (UID: \"1b1f6884-b4f4-4657-a039-930296794fbe\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.235149 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmn4x\" (UniqueName: \"kubernetes.io/projected/1b1f6884-b4f4-4657-a039-930296794fbe-kube-api-access-bmn4x\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp\" (UID: \"1b1f6884-b4f4-4657-a039-930296794fbe\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.235206 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp\" (UID: \"1b1f6884-b4f4-4657-a039-930296794fbe\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.238903 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp"] Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.336967 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp\" (UID: \"1b1f6884-b4f4-4657-a039-930296794fbe\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.337413 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp\" (UID: \"1b1f6884-b4f4-4657-a039-930296794fbe\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.337462 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp\" (UID: \"1b1f6884-b4f4-4657-a039-930296794fbe\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.337549 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmn4x\" (UniqueName: \"kubernetes.io/projected/1b1f6884-b4f4-4657-a039-930296794fbe-kube-api-access-bmn4x\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp\" (UID: \"1b1f6884-b4f4-4657-a039-930296794fbe\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.337599 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp\" (UID: \"1b1f6884-b4f4-4657-a039-930296794fbe\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.341462 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp\" (UID: \"1b1f6884-b4f4-4657-a039-930296794fbe\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.343945 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp\" (UID: \"1b1f6884-b4f4-4657-a039-930296794fbe\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.347308 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp\" (UID: \"1b1f6884-b4f4-4657-a039-930296794fbe\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.349805 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp\" (UID: \"1b1f6884-b4f4-4657-a039-930296794fbe\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.365007 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmn4x\" (UniqueName: \"kubernetes.io/projected/1b1f6884-b4f4-4657-a039-930296794fbe-kube-api-access-bmn4x\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp\" (UID: \"1b1f6884-b4f4-4657-a039-930296794fbe\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" Nov 24 14:08:01 crc kubenswrapper[5039]: I1124 14:08:01.557431 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" Nov 24 14:08:02 crc kubenswrapper[5039]: W1124 14:08:02.155998 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1b1f6884_b4f4_4657_a039_930296794fbe.slice/crio-5dc7eb6fb6c2cb49898215ecc2f25e4b5f08906a0dc37d854b28762db3591463 WatchSource:0}: Error finding container 5dc7eb6fb6c2cb49898215ecc2f25e4b5f08906a0dc37d854b28762db3591463: Status 404 returned error can't find the container with id 5dc7eb6fb6c2cb49898215ecc2f25e4b5f08906a0dc37d854b28762db3591463 Nov 24 14:08:02 crc kubenswrapper[5039]: I1124 14:08:02.156182 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp"] Nov 24 14:08:03 crc kubenswrapper[5039]: I1124 14:08:03.154905 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" event={"ID":"1b1f6884-b4f4-4657-a039-930296794fbe","Type":"ContainerStarted","Data":"917106f4898aa5242ebc43bf2b6b238a6b5fd51f7ce43c051ba2d38d02603eaa"} Nov 24 14:08:03 crc kubenswrapper[5039]: I1124 14:08:03.155326 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" event={"ID":"1b1f6884-b4f4-4657-a039-930296794fbe","Type":"ContainerStarted","Data":"5dc7eb6fb6c2cb49898215ecc2f25e4b5f08906a0dc37d854b28762db3591463"} Nov 24 14:08:03 crc kubenswrapper[5039]: I1124 14:08:03.188046 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" podStartSLOduration=1.733725851 podStartE2EDuration="2.188013804s" podCreationTimestamp="2025-11-24 14:08:01 +0000 UTC" firstStartedPulling="2025-11-24 14:08:02.158669065 +0000 UTC m=+2994.597793565" lastFinishedPulling="2025-11-24 14:08:02.612957018 +0000 UTC m=+2995.052081518" observedRunningTime="2025-11-24 14:08:03.175238751 +0000 UTC m=+2995.614363301" watchObservedRunningTime="2025-11-24 14:08:03.188013804 +0000 UTC m=+2995.627138344" Nov 24 14:08:08 crc kubenswrapper[5039]: I1124 14:08:08.312855 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:08:08 crc kubenswrapper[5039]: E1124 14:08:08.313694 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:08:20 crc kubenswrapper[5039]: I1124 14:08:20.308050 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:08:20 crc kubenswrapper[5039]: E1124 14:08:20.308661 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:08:35 crc kubenswrapper[5039]: I1124 14:08:35.308092 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:08:35 crc kubenswrapper[5039]: E1124 14:08:35.309781 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:08:48 crc kubenswrapper[5039]: I1124 14:08:48.314321 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:08:48 crc kubenswrapper[5039]: E1124 14:08:48.315120 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:08:51 crc kubenswrapper[5039]: I1124 14:08:51.684546 5039 scope.go:117] "RemoveContainer" containerID="e2e911ea32c407d7f6341fff73e4571e663d0b9320fa23b9b4ac7c7475e7695b" Nov 24 14:09:00 crc kubenswrapper[5039]: I1124 14:09:00.313238 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:09:00 crc kubenswrapper[5039]: E1124 14:09:00.314206 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:09:12 crc kubenswrapper[5039]: I1124 14:09:12.306717 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:09:12 crc kubenswrapper[5039]: E1124 14:09:12.309587 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:09:23 crc kubenswrapper[5039]: I1124 14:09:23.308252 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:09:23 crc kubenswrapper[5039]: E1124 14:09:23.309287 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:09:31 crc kubenswrapper[5039]: I1124 14:09:31.230237 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-czl7m"] Nov 24 14:09:31 crc kubenswrapper[5039]: I1124 14:09:31.232955 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-czl7m" Nov 24 14:09:31 crc kubenswrapper[5039]: I1124 14:09:31.245564 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-czl7m"] Nov 24 14:09:31 crc kubenswrapper[5039]: I1124 14:09:31.419182 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgx6c\" (UniqueName: \"kubernetes.io/projected/7fc0dd69-f567-4bd7-a642-88c9b1725f59-kube-api-access-fgx6c\") pod \"redhat-operators-czl7m\" (UID: \"7fc0dd69-f567-4bd7-a642-88c9b1725f59\") " pod="openshift-marketplace/redhat-operators-czl7m" Nov 24 14:09:31 crc kubenswrapper[5039]: I1124 14:09:31.420124 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fc0dd69-f567-4bd7-a642-88c9b1725f59-catalog-content\") pod \"redhat-operators-czl7m\" (UID: \"7fc0dd69-f567-4bd7-a642-88c9b1725f59\") " pod="openshift-marketplace/redhat-operators-czl7m" Nov 24 14:09:31 crc kubenswrapper[5039]: I1124 14:09:31.420217 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fc0dd69-f567-4bd7-a642-88c9b1725f59-utilities\") pod \"redhat-operators-czl7m\" (UID: \"7fc0dd69-f567-4bd7-a642-88c9b1725f59\") " pod="openshift-marketplace/redhat-operators-czl7m" Nov 24 14:09:31 crc kubenswrapper[5039]: I1124 14:09:31.522095 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fc0dd69-f567-4bd7-a642-88c9b1725f59-catalog-content\") pod \"redhat-operators-czl7m\" (UID: \"7fc0dd69-f567-4bd7-a642-88c9b1725f59\") " pod="openshift-marketplace/redhat-operators-czl7m" Nov 24 14:09:31 crc kubenswrapper[5039]: I1124 14:09:31.522152 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fc0dd69-f567-4bd7-a642-88c9b1725f59-utilities\") pod \"redhat-operators-czl7m\" (UID: \"7fc0dd69-f567-4bd7-a642-88c9b1725f59\") " pod="openshift-marketplace/redhat-operators-czl7m" Nov 24 14:09:31 crc kubenswrapper[5039]: I1124 14:09:31.522280 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgx6c\" (UniqueName: \"kubernetes.io/projected/7fc0dd69-f567-4bd7-a642-88c9b1725f59-kube-api-access-fgx6c\") pod \"redhat-operators-czl7m\" (UID: \"7fc0dd69-f567-4bd7-a642-88c9b1725f59\") " pod="openshift-marketplace/redhat-operators-czl7m" Nov 24 14:09:31 crc kubenswrapper[5039]: I1124 14:09:31.522669 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fc0dd69-f567-4bd7-a642-88c9b1725f59-catalog-content\") pod \"redhat-operators-czl7m\" (UID: \"7fc0dd69-f567-4bd7-a642-88c9b1725f59\") " pod="openshift-marketplace/redhat-operators-czl7m" Nov 24 14:09:31 crc kubenswrapper[5039]: I1124 14:09:31.522771 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fc0dd69-f567-4bd7-a642-88c9b1725f59-utilities\") pod \"redhat-operators-czl7m\" (UID: \"7fc0dd69-f567-4bd7-a642-88c9b1725f59\") " pod="openshift-marketplace/redhat-operators-czl7m" Nov 24 14:09:31 crc kubenswrapper[5039]: I1124 14:09:31.545411 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgx6c\" (UniqueName: \"kubernetes.io/projected/7fc0dd69-f567-4bd7-a642-88c9b1725f59-kube-api-access-fgx6c\") pod \"redhat-operators-czl7m\" (UID: \"7fc0dd69-f567-4bd7-a642-88c9b1725f59\") " pod="openshift-marketplace/redhat-operators-czl7m" Nov 24 14:09:31 crc kubenswrapper[5039]: I1124 14:09:31.577195 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-czl7m" Nov 24 14:09:32 crc kubenswrapper[5039]: I1124 14:09:32.043847 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-czl7m"] Nov 24 14:09:32 crc kubenswrapper[5039]: I1124 14:09:32.362768 5039 generic.go:334] "Generic (PLEG): container finished" podID="7fc0dd69-f567-4bd7-a642-88c9b1725f59" containerID="a53664465cfd685074a1202460e36f1bb096359010e7f692d7833b1454f43cbd" exitCode=0 Nov 24 14:09:32 crc kubenswrapper[5039]: I1124 14:09:32.362812 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-czl7m" event={"ID":"7fc0dd69-f567-4bd7-a642-88c9b1725f59","Type":"ContainerDied","Data":"a53664465cfd685074a1202460e36f1bb096359010e7f692d7833b1454f43cbd"} Nov 24 14:09:32 crc kubenswrapper[5039]: I1124 14:09:32.362838 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-czl7m" event={"ID":"7fc0dd69-f567-4bd7-a642-88c9b1725f59","Type":"ContainerStarted","Data":"811499f9a02d80104b85bcada55b0db8a947c97d7548503141db752b11c5d660"} Nov 24 14:09:32 crc kubenswrapper[5039]: I1124 14:09:32.365531 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 14:09:34 crc kubenswrapper[5039]: I1124 14:09:34.395392 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-czl7m" event={"ID":"7fc0dd69-f567-4bd7-a642-88c9b1725f59","Type":"ContainerStarted","Data":"39c5593f91e0f590e796ac547a96ec756e885ac7bf97afbb09678f0e66bd0ed8"} Nov 24 14:09:35 crc kubenswrapper[5039]: I1124 14:09:35.307729 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:09:35 crc kubenswrapper[5039]: E1124 14:09:35.308420 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:09:35 crc kubenswrapper[5039]: I1124 14:09:35.409399 5039 generic.go:334] "Generic (PLEG): container finished" podID="7fc0dd69-f567-4bd7-a642-88c9b1725f59" containerID="39c5593f91e0f590e796ac547a96ec756e885ac7bf97afbb09678f0e66bd0ed8" exitCode=0 Nov 24 14:09:35 crc kubenswrapper[5039]: I1124 14:09:35.409452 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-czl7m" event={"ID":"7fc0dd69-f567-4bd7-a642-88c9b1725f59","Type":"ContainerDied","Data":"39c5593f91e0f590e796ac547a96ec756e885ac7bf97afbb09678f0e66bd0ed8"} Nov 24 14:09:36 crc kubenswrapper[5039]: I1124 14:09:36.430401 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-czl7m" event={"ID":"7fc0dd69-f567-4bd7-a642-88c9b1725f59","Type":"ContainerStarted","Data":"aef5056abc48e1632368f3c89ceef42b9e401c67f529db27890a5d2ece0ce7c0"} Nov 24 14:09:36 crc kubenswrapper[5039]: I1124 14:09:36.456852 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-czl7m" podStartSLOduration=1.666450294 podStartE2EDuration="5.456836649s" podCreationTimestamp="2025-11-24 14:09:31 +0000 UTC" firstStartedPulling="2025-11-24 14:09:32.365303487 +0000 UTC m=+3084.804427987" lastFinishedPulling="2025-11-24 14:09:36.155689852 +0000 UTC m=+3088.594814342" observedRunningTime="2025-11-24 14:09:36.448547235 +0000 UTC m=+3088.887671735" watchObservedRunningTime="2025-11-24 14:09:36.456836649 +0000 UTC m=+3088.895961149" Nov 24 14:09:41 crc kubenswrapper[5039]: I1124 14:09:41.578083 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-czl7m" Nov 24 14:09:41 crc kubenswrapper[5039]: I1124 14:09:41.579374 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-czl7m" Nov 24 14:09:41 crc kubenswrapper[5039]: I1124 14:09:41.650293 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-czl7m" Nov 24 14:09:42 crc kubenswrapper[5039]: I1124 14:09:42.606077 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-czl7m" Nov 24 14:09:42 crc kubenswrapper[5039]: I1124 14:09:42.674609 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-czl7m"] Nov 24 14:09:44 crc kubenswrapper[5039]: I1124 14:09:44.529335 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-czl7m" podUID="7fc0dd69-f567-4bd7-a642-88c9b1725f59" containerName="registry-server" containerID="cri-o://aef5056abc48e1632368f3c89ceef42b9e401c67f529db27890a5d2ece0ce7c0" gracePeriod=2 Nov 24 14:09:44 crc kubenswrapper[5039]: I1124 14:09:44.998199 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-czl7m" Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.114470 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fgx6c\" (UniqueName: \"kubernetes.io/projected/7fc0dd69-f567-4bd7-a642-88c9b1725f59-kube-api-access-fgx6c\") pod \"7fc0dd69-f567-4bd7-a642-88c9b1725f59\" (UID: \"7fc0dd69-f567-4bd7-a642-88c9b1725f59\") " Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.114550 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fc0dd69-f567-4bd7-a642-88c9b1725f59-utilities\") pod \"7fc0dd69-f567-4bd7-a642-88c9b1725f59\" (UID: \"7fc0dd69-f567-4bd7-a642-88c9b1725f59\") " Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.114609 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fc0dd69-f567-4bd7-a642-88c9b1725f59-catalog-content\") pod \"7fc0dd69-f567-4bd7-a642-88c9b1725f59\" (UID: \"7fc0dd69-f567-4bd7-a642-88c9b1725f59\") " Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.117209 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fc0dd69-f567-4bd7-a642-88c9b1725f59-utilities" (OuterVolumeSpecName: "utilities") pod "7fc0dd69-f567-4bd7-a642-88c9b1725f59" (UID: "7fc0dd69-f567-4bd7-a642-88c9b1725f59"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.122032 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fc0dd69-f567-4bd7-a642-88c9b1725f59-kube-api-access-fgx6c" (OuterVolumeSpecName: "kube-api-access-fgx6c") pod "7fc0dd69-f567-4bd7-a642-88c9b1725f59" (UID: "7fc0dd69-f567-4bd7-a642-88c9b1725f59"). InnerVolumeSpecName "kube-api-access-fgx6c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.201035 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fc0dd69-f567-4bd7-a642-88c9b1725f59-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7fc0dd69-f567-4bd7-a642-88c9b1725f59" (UID: "7fc0dd69-f567-4bd7-a642-88c9b1725f59"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.216624 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fc0dd69-f567-4bd7-a642-88c9b1725f59-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.216658 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fgx6c\" (UniqueName: \"kubernetes.io/projected/7fc0dd69-f567-4bd7-a642-88c9b1725f59-kube-api-access-fgx6c\") on node \"crc\" DevicePath \"\"" Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.216671 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fc0dd69-f567-4bd7-a642-88c9b1725f59-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.539614 5039 generic.go:334] "Generic (PLEG): container finished" podID="7fc0dd69-f567-4bd7-a642-88c9b1725f59" containerID="aef5056abc48e1632368f3c89ceef42b9e401c67f529db27890a5d2ece0ce7c0" exitCode=0 Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.539704 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-czl7m" event={"ID":"7fc0dd69-f567-4bd7-a642-88c9b1725f59","Type":"ContainerDied","Data":"aef5056abc48e1632368f3c89ceef42b9e401c67f529db27890a5d2ece0ce7c0"} Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.541059 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-czl7m" event={"ID":"7fc0dd69-f567-4bd7-a642-88c9b1725f59","Type":"ContainerDied","Data":"811499f9a02d80104b85bcada55b0db8a947c97d7548503141db752b11c5d660"} Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.539768 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-czl7m" Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.541124 5039 scope.go:117] "RemoveContainer" containerID="aef5056abc48e1632368f3c89ceef42b9e401c67f529db27890a5d2ece0ce7c0" Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.561072 5039 scope.go:117] "RemoveContainer" containerID="39c5593f91e0f590e796ac547a96ec756e885ac7bf97afbb09678f0e66bd0ed8" Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.580406 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-czl7m"] Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.595351 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-czl7m"] Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.599600 5039 scope.go:117] "RemoveContainer" containerID="a53664465cfd685074a1202460e36f1bb096359010e7f692d7833b1454f43cbd" Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.666625 5039 scope.go:117] "RemoveContainer" containerID="aef5056abc48e1632368f3c89ceef42b9e401c67f529db27890a5d2ece0ce7c0" Nov 24 14:09:45 crc kubenswrapper[5039]: E1124 14:09:45.668677 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aef5056abc48e1632368f3c89ceef42b9e401c67f529db27890a5d2ece0ce7c0\": container with ID starting with aef5056abc48e1632368f3c89ceef42b9e401c67f529db27890a5d2ece0ce7c0 not found: ID does not exist" containerID="aef5056abc48e1632368f3c89ceef42b9e401c67f529db27890a5d2ece0ce7c0" Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.668713 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aef5056abc48e1632368f3c89ceef42b9e401c67f529db27890a5d2ece0ce7c0"} err="failed to get container status \"aef5056abc48e1632368f3c89ceef42b9e401c67f529db27890a5d2ece0ce7c0\": rpc error: code = NotFound desc = could not find container \"aef5056abc48e1632368f3c89ceef42b9e401c67f529db27890a5d2ece0ce7c0\": container with ID starting with aef5056abc48e1632368f3c89ceef42b9e401c67f529db27890a5d2ece0ce7c0 not found: ID does not exist" Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.668735 5039 scope.go:117] "RemoveContainer" containerID="39c5593f91e0f590e796ac547a96ec756e885ac7bf97afbb09678f0e66bd0ed8" Nov 24 14:09:45 crc kubenswrapper[5039]: E1124 14:09:45.669167 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39c5593f91e0f590e796ac547a96ec756e885ac7bf97afbb09678f0e66bd0ed8\": container with ID starting with 39c5593f91e0f590e796ac547a96ec756e885ac7bf97afbb09678f0e66bd0ed8 not found: ID does not exist" containerID="39c5593f91e0f590e796ac547a96ec756e885ac7bf97afbb09678f0e66bd0ed8" Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.669189 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39c5593f91e0f590e796ac547a96ec756e885ac7bf97afbb09678f0e66bd0ed8"} err="failed to get container status \"39c5593f91e0f590e796ac547a96ec756e885ac7bf97afbb09678f0e66bd0ed8\": rpc error: code = NotFound desc = could not find container \"39c5593f91e0f590e796ac547a96ec756e885ac7bf97afbb09678f0e66bd0ed8\": container with ID starting with 39c5593f91e0f590e796ac547a96ec756e885ac7bf97afbb09678f0e66bd0ed8 not found: ID does not exist" Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.669201 5039 scope.go:117] "RemoveContainer" containerID="a53664465cfd685074a1202460e36f1bb096359010e7f692d7833b1454f43cbd" Nov 24 14:09:45 crc kubenswrapper[5039]: E1124 14:09:45.669421 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a53664465cfd685074a1202460e36f1bb096359010e7f692d7833b1454f43cbd\": container with ID starting with a53664465cfd685074a1202460e36f1bb096359010e7f692d7833b1454f43cbd not found: ID does not exist" containerID="a53664465cfd685074a1202460e36f1bb096359010e7f692d7833b1454f43cbd" Nov 24 14:09:45 crc kubenswrapper[5039]: I1124 14:09:45.669436 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a53664465cfd685074a1202460e36f1bb096359010e7f692d7833b1454f43cbd"} err="failed to get container status \"a53664465cfd685074a1202460e36f1bb096359010e7f692d7833b1454f43cbd\": rpc error: code = NotFound desc = could not find container \"a53664465cfd685074a1202460e36f1bb096359010e7f692d7833b1454f43cbd\": container with ID starting with a53664465cfd685074a1202460e36f1bb096359010e7f692d7833b1454f43cbd not found: ID does not exist" Nov 24 14:09:46 crc kubenswrapper[5039]: I1124 14:09:46.319942 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7fc0dd69-f567-4bd7-a642-88c9b1725f59" path="/var/lib/kubelet/pods/7fc0dd69-f567-4bd7-a642-88c9b1725f59/volumes" Nov 24 14:09:47 crc kubenswrapper[5039]: I1124 14:09:47.306740 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:09:47 crc kubenswrapper[5039]: E1124 14:09:47.307115 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:09:54 crc kubenswrapper[5039]: I1124 14:09:54.638112 5039 generic.go:334] "Generic (PLEG): container finished" podID="1b1f6884-b4f4-4657-a039-930296794fbe" containerID="917106f4898aa5242ebc43bf2b6b238a6b5fd51f7ce43c051ba2d38d02603eaa" exitCode=0 Nov 24 14:09:54 crc kubenswrapper[5039]: I1124 14:09:54.638604 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" event={"ID":"1b1f6884-b4f4-4657-a039-930296794fbe","Type":"ContainerDied","Data":"917106f4898aa5242ebc43bf2b6b238a6b5fd51f7ce43c051ba2d38d02603eaa"} Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.106554 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.292357 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-ssh-key\") pod \"1b1f6884-b4f4-4657-a039-930296794fbe\" (UID: \"1b1f6884-b4f4-4657-a039-930296794fbe\") " Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.292495 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-ceph\") pod \"1b1f6884-b4f4-4657-a039-930296794fbe\" (UID: \"1b1f6884-b4f4-4657-a039-930296794fbe\") " Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.292616 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-bootstrap-combined-ca-bundle\") pod \"1b1f6884-b4f4-4657-a039-930296794fbe\" (UID: \"1b1f6884-b4f4-4657-a039-930296794fbe\") " Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.292716 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-inventory\") pod \"1b1f6884-b4f4-4657-a039-930296794fbe\" (UID: \"1b1f6884-b4f4-4657-a039-930296794fbe\") " Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.292842 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bmn4x\" (UniqueName: \"kubernetes.io/projected/1b1f6884-b4f4-4657-a039-930296794fbe-kube-api-access-bmn4x\") pod \"1b1f6884-b4f4-4657-a039-930296794fbe\" (UID: \"1b1f6884-b4f4-4657-a039-930296794fbe\") " Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.299795 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-ceph" (OuterVolumeSpecName: "ceph") pod "1b1f6884-b4f4-4657-a039-930296794fbe" (UID: "1b1f6884-b4f4-4657-a039-930296794fbe"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.301908 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b1f6884-b4f4-4657-a039-930296794fbe-kube-api-access-bmn4x" (OuterVolumeSpecName: "kube-api-access-bmn4x") pod "1b1f6884-b4f4-4657-a039-930296794fbe" (UID: "1b1f6884-b4f4-4657-a039-930296794fbe"). InnerVolumeSpecName "kube-api-access-bmn4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.302690 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "1b1f6884-b4f4-4657-a039-930296794fbe" (UID: "1b1f6884-b4f4-4657-a039-930296794fbe"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.328135 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1b1f6884-b4f4-4657-a039-930296794fbe" (UID: "1b1f6884-b4f4-4657-a039-930296794fbe"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.328678 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-inventory" (OuterVolumeSpecName: "inventory") pod "1b1f6884-b4f4-4657-a039-930296794fbe" (UID: "1b1f6884-b4f4-4657-a039-930296794fbe"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.395528 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.395573 5039 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.395593 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.395606 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bmn4x\" (UniqueName: \"kubernetes.io/projected/1b1f6884-b4f4-4657-a039-930296794fbe-kube-api-access-bmn4x\") on node \"crc\" DevicePath \"\"" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.395621 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1b1f6884-b4f4-4657-a039-930296794fbe-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.658268 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" event={"ID":"1b1f6884-b4f4-4657-a039-930296794fbe","Type":"ContainerDied","Data":"5dc7eb6fb6c2cb49898215ecc2f25e4b5f08906a0dc37d854b28762db3591463"} Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.658309 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5dc7eb6fb6c2cb49898215ecc2f25e4b5f08906a0dc37d854b28762db3591463" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.658361 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.769564 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl"] Nov 24 14:09:56 crc kubenswrapper[5039]: E1124 14:09:56.770066 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fc0dd69-f567-4bd7-a642-88c9b1725f59" containerName="extract-content" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.770080 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fc0dd69-f567-4bd7-a642-88c9b1725f59" containerName="extract-content" Nov 24 14:09:56 crc kubenswrapper[5039]: E1124 14:09:56.770099 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fc0dd69-f567-4bd7-a642-88c9b1725f59" containerName="registry-server" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.770106 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fc0dd69-f567-4bd7-a642-88c9b1725f59" containerName="registry-server" Nov 24 14:09:56 crc kubenswrapper[5039]: E1124 14:09:56.770134 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b1f6884-b4f4-4657-a039-930296794fbe" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.770144 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b1f6884-b4f4-4657-a039-930296794fbe" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 24 14:09:56 crc kubenswrapper[5039]: E1124 14:09:56.770172 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fc0dd69-f567-4bd7-a642-88c9b1725f59" containerName="extract-utilities" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.770180 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fc0dd69-f567-4bd7-a642-88c9b1725f59" containerName="extract-utilities" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.770460 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b1f6884-b4f4-4657-a039-930296794fbe" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.770477 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fc0dd69-f567-4bd7-a642-88c9b1725f59" containerName="registry-server" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.771425 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.774658 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.774762 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.774883 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.775204 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.775414 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.812991 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl"] Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.904748 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3ffbbfde-6e25-49e1-ab24-061d1e90c133-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl\" (UID: \"3ffbbfde-6e25-49e1-ab24-061d1e90c133\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.904815 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3ffbbfde-6e25-49e1-ab24-061d1e90c133-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl\" (UID: \"3ffbbfde-6e25-49e1-ab24-061d1e90c133\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.905394 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3ffbbfde-6e25-49e1-ab24-061d1e90c133-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl\" (UID: \"3ffbbfde-6e25-49e1-ab24-061d1e90c133\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl" Nov 24 14:09:56 crc kubenswrapper[5039]: I1124 14:09:56.905691 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbkm2\" (UniqueName: \"kubernetes.io/projected/3ffbbfde-6e25-49e1-ab24-061d1e90c133-kube-api-access-xbkm2\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl\" (UID: \"3ffbbfde-6e25-49e1-ab24-061d1e90c133\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl" Nov 24 14:09:57 crc kubenswrapper[5039]: I1124 14:09:57.007477 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3ffbbfde-6e25-49e1-ab24-061d1e90c133-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl\" (UID: \"3ffbbfde-6e25-49e1-ab24-061d1e90c133\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl" Nov 24 14:09:57 crc kubenswrapper[5039]: I1124 14:09:57.007607 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbkm2\" (UniqueName: \"kubernetes.io/projected/3ffbbfde-6e25-49e1-ab24-061d1e90c133-kube-api-access-xbkm2\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl\" (UID: \"3ffbbfde-6e25-49e1-ab24-061d1e90c133\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl" Nov 24 14:09:57 crc kubenswrapper[5039]: I1124 14:09:57.007643 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3ffbbfde-6e25-49e1-ab24-061d1e90c133-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl\" (UID: \"3ffbbfde-6e25-49e1-ab24-061d1e90c133\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl" Nov 24 14:09:57 crc kubenswrapper[5039]: I1124 14:09:57.007665 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3ffbbfde-6e25-49e1-ab24-061d1e90c133-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl\" (UID: \"3ffbbfde-6e25-49e1-ab24-061d1e90c133\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl" Nov 24 14:09:57 crc kubenswrapper[5039]: I1124 14:09:57.011380 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3ffbbfde-6e25-49e1-ab24-061d1e90c133-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl\" (UID: \"3ffbbfde-6e25-49e1-ab24-061d1e90c133\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl" Nov 24 14:09:57 crc kubenswrapper[5039]: I1124 14:09:57.011479 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3ffbbfde-6e25-49e1-ab24-061d1e90c133-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl\" (UID: \"3ffbbfde-6e25-49e1-ab24-061d1e90c133\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl" Nov 24 14:09:57 crc kubenswrapper[5039]: I1124 14:09:57.011799 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3ffbbfde-6e25-49e1-ab24-061d1e90c133-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl\" (UID: \"3ffbbfde-6e25-49e1-ab24-061d1e90c133\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl" Nov 24 14:09:57 crc kubenswrapper[5039]: I1124 14:09:57.027553 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbkm2\" (UniqueName: \"kubernetes.io/projected/3ffbbfde-6e25-49e1-ab24-061d1e90c133-kube-api-access-xbkm2\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl\" (UID: \"3ffbbfde-6e25-49e1-ab24-061d1e90c133\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl" Nov 24 14:09:57 crc kubenswrapper[5039]: I1124 14:09:57.118436 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl" Nov 24 14:09:57 crc kubenswrapper[5039]: W1124 14:09:57.712515 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3ffbbfde_6e25_49e1_ab24_061d1e90c133.slice/crio-6f5abf32bc00c2a30f80dede7ee9fda4ef156d500eedb1c7e8e50bebeb2b565f WatchSource:0}: Error finding container 6f5abf32bc00c2a30f80dede7ee9fda4ef156d500eedb1c7e8e50bebeb2b565f: Status 404 returned error can't find the container with id 6f5abf32bc00c2a30f80dede7ee9fda4ef156d500eedb1c7e8e50bebeb2b565f Nov 24 14:09:57 crc kubenswrapper[5039]: I1124 14:09:57.714958 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl"] Nov 24 14:09:58 crc kubenswrapper[5039]: I1124 14:09:58.682691 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl" event={"ID":"3ffbbfde-6e25-49e1-ab24-061d1e90c133","Type":"ContainerStarted","Data":"7e5074df3b7f28d9322492bba6a9c013edf9a837b18c4d25fcc2f65065879296"} Nov 24 14:09:58 crc kubenswrapper[5039]: I1124 14:09:58.683289 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl" event={"ID":"3ffbbfde-6e25-49e1-ab24-061d1e90c133","Type":"ContainerStarted","Data":"6f5abf32bc00c2a30f80dede7ee9fda4ef156d500eedb1c7e8e50bebeb2b565f"} Nov 24 14:09:58 crc kubenswrapper[5039]: I1124 14:09:58.702293 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl" podStartSLOduration=2.191452811 podStartE2EDuration="2.702274525s" podCreationTimestamp="2025-11-24 14:09:56 +0000 UTC" firstStartedPulling="2025-11-24 14:09:57.715149903 +0000 UTC m=+3110.154274403" lastFinishedPulling="2025-11-24 14:09:58.225971607 +0000 UTC m=+3110.665096117" observedRunningTime="2025-11-24 14:09:58.696300139 +0000 UTC m=+3111.135424649" watchObservedRunningTime="2025-11-24 14:09:58.702274525 +0000 UTC m=+3111.141399025" Nov 24 14:09:59 crc kubenswrapper[5039]: I1124 14:09:59.307929 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:09:59 crc kubenswrapper[5039]: E1124 14:09:59.308351 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:10:11 crc kubenswrapper[5039]: I1124 14:10:11.307245 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:10:11 crc kubenswrapper[5039]: E1124 14:10:11.308106 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:10:26 crc kubenswrapper[5039]: I1124 14:10:26.307761 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:10:26 crc kubenswrapper[5039]: E1124 14:10:26.309036 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:10:31 crc kubenswrapper[5039]: I1124 14:10:31.036882 5039 generic.go:334] "Generic (PLEG): container finished" podID="3ffbbfde-6e25-49e1-ab24-061d1e90c133" containerID="7e5074df3b7f28d9322492bba6a9c013edf9a837b18c4d25fcc2f65065879296" exitCode=0 Nov 24 14:10:31 crc kubenswrapper[5039]: I1124 14:10:31.036991 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl" event={"ID":"3ffbbfde-6e25-49e1-ab24-061d1e90c133","Type":"ContainerDied","Data":"7e5074df3b7f28d9322492bba6a9c013edf9a837b18c4d25fcc2f65065879296"} Nov 24 14:10:32 crc kubenswrapper[5039]: I1124 14:10:32.543624 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl" Nov 24 14:10:32 crc kubenswrapper[5039]: I1124 14:10:32.643190 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xbkm2\" (UniqueName: \"kubernetes.io/projected/3ffbbfde-6e25-49e1-ab24-061d1e90c133-kube-api-access-xbkm2\") pod \"3ffbbfde-6e25-49e1-ab24-061d1e90c133\" (UID: \"3ffbbfde-6e25-49e1-ab24-061d1e90c133\") " Nov 24 14:10:32 crc kubenswrapper[5039]: I1124 14:10:32.643265 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3ffbbfde-6e25-49e1-ab24-061d1e90c133-ssh-key\") pod \"3ffbbfde-6e25-49e1-ab24-061d1e90c133\" (UID: \"3ffbbfde-6e25-49e1-ab24-061d1e90c133\") " Nov 24 14:10:32 crc kubenswrapper[5039]: I1124 14:10:32.643299 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3ffbbfde-6e25-49e1-ab24-061d1e90c133-ceph\") pod \"3ffbbfde-6e25-49e1-ab24-061d1e90c133\" (UID: \"3ffbbfde-6e25-49e1-ab24-061d1e90c133\") " Nov 24 14:10:32 crc kubenswrapper[5039]: I1124 14:10:32.643346 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3ffbbfde-6e25-49e1-ab24-061d1e90c133-inventory\") pod \"3ffbbfde-6e25-49e1-ab24-061d1e90c133\" (UID: \"3ffbbfde-6e25-49e1-ab24-061d1e90c133\") " Nov 24 14:10:32 crc kubenswrapper[5039]: I1124 14:10:32.649027 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ffbbfde-6e25-49e1-ab24-061d1e90c133-ceph" (OuterVolumeSpecName: "ceph") pod "3ffbbfde-6e25-49e1-ab24-061d1e90c133" (UID: "3ffbbfde-6e25-49e1-ab24-061d1e90c133"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:10:32 crc kubenswrapper[5039]: I1124 14:10:32.653049 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ffbbfde-6e25-49e1-ab24-061d1e90c133-kube-api-access-xbkm2" (OuterVolumeSpecName: "kube-api-access-xbkm2") pod "3ffbbfde-6e25-49e1-ab24-061d1e90c133" (UID: "3ffbbfde-6e25-49e1-ab24-061d1e90c133"). InnerVolumeSpecName "kube-api-access-xbkm2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:10:32 crc kubenswrapper[5039]: I1124 14:10:32.678773 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ffbbfde-6e25-49e1-ab24-061d1e90c133-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3ffbbfde-6e25-49e1-ab24-061d1e90c133" (UID: "3ffbbfde-6e25-49e1-ab24-061d1e90c133"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:10:32 crc kubenswrapper[5039]: I1124 14:10:32.693495 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ffbbfde-6e25-49e1-ab24-061d1e90c133-inventory" (OuterVolumeSpecName: "inventory") pod "3ffbbfde-6e25-49e1-ab24-061d1e90c133" (UID: "3ffbbfde-6e25-49e1-ab24-061d1e90c133"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:10:32 crc kubenswrapper[5039]: I1124 14:10:32.746919 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xbkm2\" (UniqueName: \"kubernetes.io/projected/3ffbbfde-6e25-49e1-ab24-061d1e90c133-kube-api-access-xbkm2\") on node \"crc\" DevicePath \"\"" Nov 24 14:10:32 crc kubenswrapper[5039]: I1124 14:10:32.746953 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3ffbbfde-6e25-49e1-ab24-061d1e90c133-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:10:32 crc kubenswrapper[5039]: I1124 14:10:32.746969 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3ffbbfde-6e25-49e1-ab24-061d1e90c133-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:10:32 crc kubenswrapper[5039]: I1124 14:10:32.746980 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3ffbbfde-6e25-49e1-ab24-061d1e90c133-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.057712 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl" event={"ID":"3ffbbfde-6e25-49e1-ab24-061d1e90c133","Type":"ContainerDied","Data":"6f5abf32bc00c2a30f80dede7ee9fda4ef156d500eedb1c7e8e50bebeb2b565f"} Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.057758 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f5abf32bc00c2a30f80dede7ee9fda4ef156d500eedb1c7e8e50bebeb2b565f" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.057773 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.137615 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7"] Nov 24 14:10:33 crc kubenswrapper[5039]: E1124 14:10:33.138212 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ffbbfde-6e25-49e1-ab24-061d1e90c133" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.138238 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ffbbfde-6e25-49e1-ab24-061d1e90c133" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.138547 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ffbbfde-6e25-49e1-ab24-061d1e90c133" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.139792 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.143769 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.144057 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.144120 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.144066 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.144365 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.151817 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7"] Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.153894 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f9911acb-6e34-497c-9346-18b3299f63be-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7\" (UID: \"f9911acb-6e34-497c-9346-18b3299f63be\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.153936 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f9911acb-6e34-497c-9346-18b3299f63be-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7\" (UID: \"f9911acb-6e34-497c-9346-18b3299f63be\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.153983 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f9911acb-6e34-497c-9346-18b3299f63be-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7\" (UID: \"f9911acb-6e34-497c-9346-18b3299f63be\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.154210 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fm25k\" (UniqueName: \"kubernetes.io/projected/f9911acb-6e34-497c-9346-18b3299f63be-kube-api-access-fm25k\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7\" (UID: \"f9911acb-6e34-497c-9346-18b3299f63be\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.256829 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f9911acb-6e34-497c-9346-18b3299f63be-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7\" (UID: \"f9911acb-6e34-497c-9346-18b3299f63be\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.256876 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f9911acb-6e34-497c-9346-18b3299f63be-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7\" (UID: \"f9911acb-6e34-497c-9346-18b3299f63be\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.256918 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f9911acb-6e34-497c-9346-18b3299f63be-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7\" (UID: \"f9911acb-6e34-497c-9346-18b3299f63be\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.256988 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fm25k\" (UniqueName: \"kubernetes.io/projected/f9911acb-6e34-497c-9346-18b3299f63be-kube-api-access-fm25k\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7\" (UID: \"f9911acb-6e34-497c-9346-18b3299f63be\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.260554 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f9911acb-6e34-497c-9346-18b3299f63be-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7\" (UID: \"f9911acb-6e34-497c-9346-18b3299f63be\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.262340 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f9911acb-6e34-497c-9346-18b3299f63be-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7\" (UID: \"f9911acb-6e34-497c-9346-18b3299f63be\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.269840 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f9911acb-6e34-497c-9346-18b3299f63be-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7\" (UID: \"f9911acb-6e34-497c-9346-18b3299f63be\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.273148 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fm25k\" (UniqueName: \"kubernetes.io/projected/f9911acb-6e34-497c-9346-18b3299f63be-kube-api-access-fm25k\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7\" (UID: \"f9911acb-6e34-497c-9346-18b3299f63be\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7" Nov 24 14:10:33 crc kubenswrapper[5039]: I1124 14:10:33.457779 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7" Nov 24 14:10:34 crc kubenswrapper[5039]: W1124 14:10:34.005358 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf9911acb_6e34_497c_9346_18b3299f63be.slice/crio-0d728e7a5900a2d0d942afa18a48d25da0e1a58a42e9da5f223513f5ff3f394f WatchSource:0}: Error finding container 0d728e7a5900a2d0d942afa18a48d25da0e1a58a42e9da5f223513f5ff3f394f: Status 404 returned error can't find the container with id 0d728e7a5900a2d0d942afa18a48d25da0e1a58a42e9da5f223513f5ff3f394f Nov 24 14:10:34 crc kubenswrapper[5039]: I1124 14:10:34.005491 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7"] Nov 24 14:10:34 crc kubenswrapper[5039]: I1124 14:10:34.066365 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7" event={"ID":"f9911acb-6e34-497c-9346-18b3299f63be","Type":"ContainerStarted","Data":"0d728e7a5900a2d0d942afa18a48d25da0e1a58a42e9da5f223513f5ff3f394f"} Nov 24 14:10:35 crc kubenswrapper[5039]: I1124 14:10:35.105864 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7" event={"ID":"f9911acb-6e34-497c-9346-18b3299f63be","Type":"ContainerStarted","Data":"ffc075f8eb639ed9e83bb32d91c610e98bb56eb1e4a7b947d4b762a47fe3da11"} Nov 24 14:10:35 crc kubenswrapper[5039]: I1124 14:10:35.131681 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7" podStartSLOduration=1.646767519 podStartE2EDuration="2.131656647s" podCreationTimestamp="2025-11-24 14:10:33 +0000 UTC" firstStartedPulling="2025-11-24 14:10:34.007951563 +0000 UTC m=+3146.447076063" lastFinishedPulling="2025-11-24 14:10:34.492840691 +0000 UTC m=+3146.931965191" observedRunningTime="2025-11-24 14:10:35.123185279 +0000 UTC m=+3147.562309799" watchObservedRunningTime="2025-11-24 14:10:35.131656647 +0000 UTC m=+3147.570781147" Nov 24 14:10:37 crc kubenswrapper[5039]: I1124 14:10:37.306494 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:10:37 crc kubenswrapper[5039]: E1124 14:10:37.307314 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:10:41 crc kubenswrapper[5039]: I1124 14:10:41.160642 5039 generic.go:334] "Generic (PLEG): container finished" podID="f9911acb-6e34-497c-9346-18b3299f63be" containerID="ffc075f8eb639ed9e83bb32d91c610e98bb56eb1e4a7b947d4b762a47fe3da11" exitCode=0 Nov 24 14:10:41 crc kubenswrapper[5039]: I1124 14:10:41.160721 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7" event={"ID":"f9911acb-6e34-497c-9346-18b3299f63be","Type":"ContainerDied","Data":"ffc075f8eb639ed9e83bb32d91c610e98bb56eb1e4a7b947d4b762a47fe3da11"} Nov 24 14:10:42 crc kubenswrapper[5039]: I1124 14:10:42.681570 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7" Nov 24 14:10:42 crc kubenswrapper[5039]: I1124 14:10:42.751659 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f9911acb-6e34-497c-9346-18b3299f63be-ceph\") pod \"f9911acb-6e34-497c-9346-18b3299f63be\" (UID: \"f9911acb-6e34-497c-9346-18b3299f63be\") " Nov 24 14:10:42 crc kubenswrapper[5039]: I1124 14:10:42.751807 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f9911acb-6e34-497c-9346-18b3299f63be-ssh-key\") pod \"f9911acb-6e34-497c-9346-18b3299f63be\" (UID: \"f9911acb-6e34-497c-9346-18b3299f63be\") " Nov 24 14:10:42 crc kubenswrapper[5039]: I1124 14:10:42.751843 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f9911acb-6e34-497c-9346-18b3299f63be-inventory\") pod \"f9911acb-6e34-497c-9346-18b3299f63be\" (UID: \"f9911acb-6e34-497c-9346-18b3299f63be\") " Nov 24 14:10:42 crc kubenswrapper[5039]: I1124 14:10:42.751965 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fm25k\" (UniqueName: \"kubernetes.io/projected/f9911acb-6e34-497c-9346-18b3299f63be-kube-api-access-fm25k\") pod \"f9911acb-6e34-497c-9346-18b3299f63be\" (UID: \"f9911acb-6e34-497c-9346-18b3299f63be\") " Nov 24 14:10:42 crc kubenswrapper[5039]: I1124 14:10:42.760917 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9911acb-6e34-497c-9346-18b3299f63be-ceph" (OuterVolumeSpecName: "ceph") pod "f9911acb-6e34-497c-9346-18b3299f63be" (UID: "f9911acb-6e34-497c-9346-18b3299f63be"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:10:42 crc kubenswrapper[5039]: I1124 14:10:42.761031 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9911acb-6e34-497c-9346-18b3299f63be-kube-api-access-fm25k" (OuterVolumeSpecName: "kube-api-access-fm25k") pod "f9911acb-6e34-497c-9346-18b3299f63be" (UID: "f9911acb-6e34-497c-9346-18b3299f63be"). InnerVolumeSpecName "kube-api-access-fm25k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:10:42 crc kubenswrapper[5039]: I1124 14:10:42.784821 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9911acb-6e34-497c-9346-18b3299f63be-inventory" (OuterVolumeSpecName: "inventory") pod "f9911acb-6e34-497c-9346-18b3299f63be" (UID: "f9911acb-6e34-497c-9346-18b3299f63be"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:10:42 crc kubenswrapper[5039]: I1124 14:10:42.789989 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9911acb-6e34-497c-9346-18b3299f63be-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "f9911acb-6e34-497c-9346-18b3299f63be" (UID: "f9911acb-6e34-497c-9346-18b3299f63be"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:10:42 crc kubenswrapper[5039]: I1124 14:10:42.855045 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fm25k\" (UniqueName: \"kubernetes.io/projected/f9911acb-6e34-497c-9346-18b3299f63be-kube-api-access-fm25k\") on node \"crc\" DevicePath \"\"" Nov 24 14:10:42 crc kubenswrapper[5039]: I1124 14:10:42.855088 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f9911acb-6e34-497c-9346-18b3299f63be-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:10:42 crc kubenswrapper[5039]: I1124 14:10:42.855105 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f9911acb-6e34-497c-9346-18b3299f63be-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:10:42 crc kubenswrapper[5039]: I1124 14:10:42.855116 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f9911acb-6e34-497c-9346-18b3299f63be-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.182167 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7" event={"ID":"f9911acb-6e34-497c-9346-18b3299f63be","Type":"ContainerDied","Data":"0d728e7a5900a2d0d942afa18a48d25da0e1a58a42e9da5f223513f5ff3f394f"} Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.182220 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0d728e7a5900a2d0d942afa18a48d25da0e1a58a42e9da5f223513f5ff3f394f" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.182263 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.313230 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n"] Nov 24 14:10:43 crc kubenswrapper[5039]: E1124 14:10:43.313606 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9911acb-6e34-497c-9346-18b3299f63be" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.313624 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9911acb-6e34-497c-9346-18b3299f63be" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.314650 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9911acb-6e34-497c-9346-18b3299f63be" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.316366 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.323489 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.323491 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.323624 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.323922 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.324530 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.349912 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n"] Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.365710 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6ae099f1-378b-4de8-a8aa-480a714ccbaf-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-w6l8n\" (UID: \"6ae099f1-378b-4de8-a8aa-480a714ccbaf\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.365752 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6ae099f1-378b-4de8-a8aa-480a714ccbaf-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-w6l8n\" (UID: \"6ae099f1-378b-4de8-a8aa-480a714ccbaf\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.365870 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvk7v\" (UniqueName: \"kubernetes.io/projected/6ae099f1-378b-4de8-a8aa-480a714ccbaf-kube-api-access-wvk7v\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-w6l8n\" (UID: \"6ae099f1-378b-4de8-a8aa-480a714ccbaf\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.365940 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6ae099f1-378b-4de8-a8aa-480a714ccbaf-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-w6l8n\" (UID: \"6ae099f1-378b-4de8-a8aa-480a714ccbaf\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.468815 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6ae099f1-378b-4de8-a8aa-480a714ccbaf-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-w6l8n\" (UID: \"6ae099f1-378b-4de8-a8aa-480a714ccbaf\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.468997 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6ae099f1-378b-4de8-a8aa-480a714ccbaf-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-w6l8n\" (UID: \"6ae099f1-378b-4de8-a8aa-480a714ccbaf\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.469021 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6ae099f1-378b-4de8-a8aa-480a714ccbaf-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-w6l8n\" (UID: \"6ae099f1-378b-4de8-a8aa-480a714ccbaf\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.469093 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvk7v\" (UniqueName: \"kubernetes.io/projected/6ae099f1-378b-4de8-a8aa-480a714ccbaf-kube-api-access-wvk7v\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-w6l8n\" (UID: \"6ae099f1-378b-4de8-a8aa-480a714ccbaf\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.474337 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6ae099f1-378b-4de8-a8aa-480a714ccbaf-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-w6l8n\" (UID: \"6ae099f1-378b-4de8-a8aa-480a714ccbaf\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.474409 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6ae099f1-378b-4de8-a8aa-480a714ccbaf-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-w6l8n\" (UID: \"6ae099f1-378b-4de8-a8aa-480a714ccbaf\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.475559 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6ae099f1-378b-4de8-a8aa-480a714ccbaf-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-w6l8n\" (UID: \"6ae099f1-378b-4de8-a8aa-480a714ccbaf\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.490649 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvk7v\" (UniqueName: \"kubernetes.io/projected/6ae099f1-378b-4de8-a8aa-480a714ccbaf-kube-api-access-wvk7v\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-w6l8n\" (UID: \"6ae099f1-378b-4de8-a8aa-480a714ccbaf\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n" Nov 24 14:10:43 crc kubenswrapper[5039]: I1124 14:10:43.645249 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n" Nov 24 14:10:44 crc kubenswrapper[5039]: I1124 14:10:44.011348 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n"] Nov 24 14:10:44 crc kubenswrapper[5039]: I1124 14:10:44.195282 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n" event={"ID":"6ae099f1-378b-4de8-a8aa-480a714ccbaf","Type":"ContainerStarted","Data":"afcdbc604ff491a8846089e51a1d0793a3b157a2bb2d9e0032d7e84f47dc1732"} Nov 24 14:10:45 crc kubenswrapper[5039]: I1124 14:10:45.206432 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n" event={"ID":"6ae099f1-378b-4de8-a8aa-480a714ccbaf","Type":"ContainerStarted","Data":"6e43543c8e5572427e85e74fb5a9e5b45bc3a80aa42fceb1f10d2d442f8b0bb6"} Nov 24 14:10:45 crc kubenswrapper[5039]: I1124 14:10:45.232255 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n" podStartSLOduration=1.7770568949999999 podStartE2EDuration="2.232230906s" podCreationTimestamp="2025-11-24 14:10:43 +0000 UTC" firstStartedPulling="2025-11-24 14:10:44.010569855 +0000 UTC m=+3156.449694365" lastFinishedPulling="2025-11-24 14:10:44.465743836 +0000 UTC m=+3156.904868376" observedRunningTime="2025-11-24 14:10:45.231665873 +0000 UTC m=+3157.670790423" watchObservedRunningTime="2025-11-24 14:10:45.232230906 +0000 UTC m=+3157.671355416" Nov 24 14:10:51 crc kubenswrapper[5039]: I1124 14:10:51.307470 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:10:51 crc kubenswrapper[5039]: E1124 14:10:51.309038 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:11:05 crc kubenswrapper[5039]: I1124 14:11:05.308860 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:11:05 crc kubenswrapper[5039]: E1124 14:11:05.311315 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:11:19 crc kubenswrapper[5039]: I1124 14:11:19.307394 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:11:19 crc kubenswrapper[5039]: E1124 14:11:19.308334 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:11:30 crc kubenswrapper[5039]: I1124 14:11:30.307349 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:11:30 crc kubenswrapper[5039]: I1124 14:11:30.741412 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"1622d9e91171952ff1f7ba0ab24928decb597098c11dc9185b79112e31ea3dec"} Nov 24 14:11:30 crc kubenswrapper[5039]: I1124 14:11:30.745113 5039 generic.go:334] "Generic (PLEG): container finished" podID="6ae099f1-378b-4de8-a8aa-480a714ccbaf" containerID="6e43543c8e5572427e85e74fb5a9e5b45bc3a80aa42fceb1f10d2d442f8b0bb6" exitCode=0 Nov 24 14:11:30 crc kubenswrapper[5039]: I1124 14:11:30.745205 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n" event={"ID":"6ae099f1-378b-4de8-a8aa-480a714ccbaf","Type":"ContainerDied","Data":"6e43543c8e5572427e85e74fb5a9e5b45bc3a80aa42fceb1f10d2d442f8b0bb6"} Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.295657 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.379242 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6ae099f1-378b-4de8-a8aa-480a714ccbaf-inventory\") pod \"6ae099f1-378b-4de8-a8aa-480a714ccbaf\" (UID: \"6ae099f1-378b-4de8-a8aa-480a714ccbaf\") " Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.379292 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6ae099f1-378b-4de8-a8aa-480a714ccbaf-ssh-key\") pod \"6ae099f1-378b-4de8-a8aa-480a714ccbaf\" (UID: \"6ae099f1-378b-4de8-a8aa-480a714ccbaf\") " Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.379393 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6ae099f1-378b-4de8-a8aa-480a714ccbaf-ceph\") pod \"6ae099f1-378b-4de8-a8aa-480a714ccbaf\" (UID: \"6ae099f1-378b-4de8-a8aa-480a714ccbaf\") " Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.379523 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wvk7v\" (UniqueName: \"kubernetes.io/projected/6ae099f1-378b-4de8-a8aa-480a714ccbaf-kube-api-access-wvk7v\") pod \"6ae099f1-378b-4de8-a8aa-480a714ccbaf\" (UID: \"6ae099f1-378b-4de8-a8aa-480a714ccbaf\") " Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.385080 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ae099f1-378b-4de8-a8aa-480a714ccbaf-kube-api-access-wvk7v" (OuterVolumeSpecName: "kube-api-access-wvk7v") pod "6ae099f1-378b-4de8-a8aa-480a714ccbaf" (UID: "6ae099f1-378b-4de8-a8aa-480a714ccbaf"). InnerVolumeSpecName "kube-api-access-wvk7v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.385168 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ae099f1-378b-4de8-a8aa-480a714ccbaf-ceph" (OuterVolumeSpecName: "ceph") pod "6ae099f1-378b-4de8-a8aa-480a714ccbaf" (UID: "6ae099f1-378b-4de8-a8aa-480a714ccbaf"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.408434 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ae099f1-378b-4de8-a8aa-480a714ccbaf-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6ae099f1-378b-4de8-a8aa-480a714ccbaf" (UID: "6ae099f1-378b-4de8-a8aa-480a714ccbaf"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.413165 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ae099f1-378b-4de8-a8aa-480a714ccbaf-inventory" (OuterVolumeSpecName: "inventory") pod "6ae099f1-378b-4de8-a8aa-480a714ccbaf" (UID: "6ae099f1-378b-4de8-a8aa-480a714ccbaf"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.482052 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6ae099f1-378b-4de8-a8aa-480a714ccbaf-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.482126 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6ae099f1-378b-4de8-a8aa-480a714ccbaf-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.482135 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6ae099f1-378b-4de8-a8aa-480a714ccbaf-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.482144 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wvk7v\" (UniqueName: \"kubernetes.io/projected/6ae099f1-378b-4de8-a8aa-480a714ccbaf-kube-api-access-wvk7v\") on node \"crc\" DevicePath \"\"" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.769791 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n" event={"ID":"6ae099f1-378b-4de8-a8aa-480a714ccbaf","Type":"ContainerDied","Data":"afcdbc604ff491a8846089e51a1d0793a3b157a2bb2d9e0032d7e84f47dc1732"} Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.769831 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="afcdbc604ff491a8846089e51a1d0793a3b157a2bb2d9e0032d7e84f47dc1732" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.769904 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-w6l8n" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.874809 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8"] Nov 24 14:11:32 crc kubenswrapper[5039]: E1124 14:11:32.875334 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ae099f1-378b-4de8-a8aa-480a714ccbaf" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.875353 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ae099f1-378b-4de8-a8aa-480a714ccbaf" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.875713 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ae099f1-378b-4de8-a8aa-480a714ccbaf" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.876418 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.879232 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.879426 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.879697 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.879814 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.879962 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.884872 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8"] Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.991250 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8\" (UID: \"a735f69f-6248-4a8a-aeed-cfb50b81c9cb\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.991334 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8\" (UID: \"a735f69f-6248-4a8a-aeed-cfb50b81c9cb\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.991437 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8\" (UID: \"a735f69f-6248-4a8a-aeed-cfb50b81c9cb\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8" Nov 24 14:11:32 crc kubenswrapper[5039]: I1124 14:11:32.991474 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcxd2\" (UniqueName: \"kubernetes.io/projected/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-kube-api-access-zcxd2\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8\" (UID: \"a735f69f-6248-4a8a-aeed-cfb50b81c9cb\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8" Nov 24 14:11:33 crc kubenswrapper[5039]: I1124 14:11:33.093352 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8\" (UID: \"a735f69f-6248-4a8a-aeed-cfb50b81c9cb\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8" Nov 24 14:11:33 crc kubenswrapper[5039]: I1124 14:11:33.093794 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8\" (UID: \"a735f69f-6248-4a8a-aeed-cfb50b81c9cb\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8" Nov 24 14:11:33 crc kubenswrapper[5039]: I1124 14:11:33.093947 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8\" (UID: \"a735f69f-6248-4a8a-aeed-cfb50b81c9cb\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8" Nov 24 14:11:33 crc kubenswrapper[5039]: I1124 14:11:33.094030 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcxd2\" (UniqueName: \"kubernetes.io/projected/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-kube-api-access-zcxd2\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8\" (UID: \"a735f69f-6248-4a8a-aeed-cfb50b81c9cb\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8" Nov 24 14:11:33 crc kubenswrapper[5039]: I1124 14:11:33.099373 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8\" (UID: \"a735f69f-6248-4a8a-aeed-cfb50b81c9cb\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8" Nov 24 14:11:33 crc kubenswrapper[5039]: I1124 14:11:33.099710 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8\" (UID: \"a735f69f-6248-4a8a-aeed-cfb50b81c9cb\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8" Nov 24 14:11:33 crc kubenswrapper[5039]: I1124 14:11:33.108269 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8\" (UID: \"a735f69f-6248-4a8a-aeed-cfb50b81c9cb\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8" Nov 24 14:11:33 crc kubenswrapper[5039]: I1124 14:11:33.121251 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcxd2\" (UniqueName: \"kubernetes.io/projected/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-kube-api-access-zcxd2\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8\" (UID: \"a735f69f-6248-4a8a-aeed-cfb50b81c9cb\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8" Nov 24 14:11:33 crc kubenswrapper[5039]: I1124 14:11:33.199873 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8" Nov 24 14:11:33 crc kubenswrapper[5039]: I1124 14:11:33.724741 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8"] Nov 24 14:11:33 crc kubenswrapper[5039]: W1124 14:11:33.729161 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda735f69f_6248_4a8a_aeed_cfb50b81c9cb.slice/crio-b0c4b23ea1551336514b87fe5c89c527fbe1510e29e95564101f52ddce17e7ca WatchSource:0}: Error finding container b0c4b23ea1551336514b87fe5c89c527fbe1510e29e95564101f52ddce17e7ca: Status 404 returned error can't find the container with id b0c4b23ea1551336514b87fe5c89c527fbe1510e29e95564101f52ddce17e7ca Nov 24 14:11:33 crc kubenswrapper[5039]: I1124 14:11:33.779339 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8" event={"ID":"a735f69f-6248-4a8a-aeed-cfb50b81c9cb","Type":"ContainerStarted","Data":"b0c4b23ea1551336514b87fe5c89c527fbe1510e29e95564101f52ddce17e7ca"} Nov 24 14:11:34 crc kubenswrapper[5039]: I1124 14:11:34.790290 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8" event={"ID":"a735f69f-6248-4a8a-aeed-cfb50b81c9cb","Type":"ContainerStarted","Data":"3a0118c454a6ab9ab3554cbf23c6b32d2aa9226712cdeeee25cf6530c2fdd9d7"} Nov 24 14:11:34 crc kubenswrapper[5039]: I1124 14:11:34.818079 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8" podStartSLOduration=2.32260636 podStartE2EDuration="2.818054606s" podCreationTimestamp="2025-11-24 14:11:32 +0000 UTC" firstStartedPulling="2025-11-24 14:11:33.732128487 +0000 UTC m=+3206.171252987" lastFinishedPulling="2025-11-24 14:11:34.227576723 +0000 UTC m=+3206.666701233" observedRunningTime="2025-11-24 14:11:34.808146344 +0000 UTC m=+3207.247270834" watchObservedRunningTime="2025-11-24 14:11:34.818054606 +0000 UTC m=+3207.257179106" Nov 24 14:11:39 crc kubenswrapper[5039]: I1124 14:11:39.838252 5039 generic.go:334] "Generic (PLEG): container finished" podID="a735f69f-6248-4a8a-aeed-cfb50b81c9cb" containerID="3a0118c454a6ab9ab3554cbf23c6b32d2aa9226712cdeeee25cf6530c2fdd9d7" exitCode=0 Nov 24 14:11:39 crc kubenswrapper[5039]: I1124 14:11:39.838342 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8" event={"ID":"a735f69f-6248-4a8a-aeed-cfb50b81c9cb","Type":"ContainerDied","Data":"3a0118c454a6ab9ab3554cbf23c6b32d2aa9226712cdeeee25cf6530c2fdd9d7"} Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.367768 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8" Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.479993 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-inventory\") pod \"a735f69f-6248-4a8a-aeed-cfb50b81c9cb\" (UID: \"a735f69f-6248-4a8a-aeed-cfb50b81c9cb\") " Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.480071 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-ssh-key\") pod \"a735f69f-6248-4a8a-aeed-cfb50b81c9cb\" (UID: \"a735f69f-6248-4a8a-aeed-cfb50b81c9cb\") " Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.480280 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-ceph\") pod \"a735f69f-6248-4a8a-aeed-cfb50b81c9cb\" (UID: \"a735f69f-6248-4a8a-aeed-cfb50b81c9cb\") " Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.480366 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zcxd2\" (UniqueName: \"kubernetes.io/projected/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-kube-api-access-zcxd2\") pod \"a735f69f-6248-4a8a-aeed-cfb50b81c9cb\" (UID: \"a735f69f-6248-4a8a-aeed-cfb50b81c9cb\") " Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.487267 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-kube-api-access-zcxd2" (OuterVolumeSpecName: "kube-api-access-zcxd2") pod "a735f69f-6248-4a8a-aeed-cfb50b81c9cb" (UID: "a735f69f-6248-4a8a-aeed-cfb50b81c9cb"). InnerVolumeSpecName "kube-api-access-zcxd2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.493882 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-ceph" (OuterVolumeSpecName: "ceph") pod "a735f69f-6248-4a8a-aeed-cfb50b81c9cb" (UID: "a735f69f-6248-4a8a-aeed-cfb50b81c9cb"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.526488 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a735f69f-6248-4a8a-aeed-cfb50b81c9cb" (UID: "a735f69f-6248-4a8a-aeed-cfb50b81c9cb"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.541285 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-inventory" (OuterVolumeSpecName: "inventory") pod "a735f69f-6248-4a8a-aeed-cfb50b81c9cb" (UID: "a735f69f-6248-4a8a-aeed-cfb50b81c9cb"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.584058 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.584442 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zcxd2\" (UniqueName: \"kubernetes.io/projected/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-kube-api-access-zcxd2\") on node \"crc\" DevicePath \"\"" Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.584463 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.584476 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a735f69f-6248-4a8a-aeed-cfb50b81c9cb-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.859937 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8" event={"ID":"a735f69f-6248-4a8a-aeed-cfb50b81c9cb","Type":"ContainerDied","Data":"b0c4b23ea1551336514b87fe5c89c527fbe1510e29e95564101f52ddce17e7ca"} Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.859983 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8" Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.859991 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b0c4b23ea1551336514b87fe5c89c527fbe1510e29e95564101f52ddce17e7ca" Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.963870 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5"] Nov 24 14:11:41 crc kubenswrapper[5039]: E1124 14:11:41.964602 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a735f69f-6248-4a8a-aeed-cfb50b81c9cb" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.964687 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="a735f69f-6248-4a8a-aeed-cfb50b81c9cb" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.965010 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="a735f69f-6248-4a8a-aeed-cfb50b81c9cb" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.966004 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5" Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.968719 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.968988 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.969231 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.970159 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.973795 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 24 14:11:41 crc kubenswrapper[5039]: I1124 14:11:41.995988 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5"] Nov 24 14:11:42 crc kubenswrapper[5039]: I1124 14:11:42.095086 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5\" (UID: \"d6370bcf-3557-4e56-9c7b-670a2ec77ec0\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5" Nov 24 14:11:42 crc kubenswrapper[5039]: I1124 14:11:42.095219 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5\" (UID: \"d6370bcf-3557-4e56-9c7b-670a2ec77ec0\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5" Nov 24 14:11:42 crc kubenswrapper[5039]: I1124 14:11:42.095388 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cj9t2\" (UniqueName: \"kubernetes.io/projected/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-kube-api-access-cj9t2\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5\" (UID: \"d6370bcf-3557-4e56-9c7b-670a2ec77ec0\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5" Nov 24 14:11:42 crc kubenswrapper[5039]: I1124 14:11:42.095716 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5\" (UID: \"d6370bcf-3557-4e56-9c7b-670a2ec77ec0\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5" Nov 24 14:11:42 crc kubenswrapper[5039]: I1124 14:11:42.197779 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5\" (UID: \"d6370bcf-3557-4e56-9c7b-670a2ec77ec0\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5" Nov 24 14:11:42 crc kubenswrapper[5039]: I1124 14:11:42.197930 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5\" (UID: \"d6370bcf-3557-4e56-9c7b-670a2ec77ec0\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5" Nov 24 14:11:42 crc kubenswrapper[5039]: I1124 14:11:42.198080 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cj9t2\" (UniqueName: \"kubernetes.io/projected/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-kube-api-access-cj9t2\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5\" (UID: \"d6370bcf-3557-4e56-9c7b-670a2ec77ec0\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5" Nov 24 14:11:42 crc kubenswrapper[5039]: I1124 14:11:42.198229 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5\" (UID: \"d6370bcf-3557-4e56-9c7b-670a2ec77ec0\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5" Nov 24 14:11:42 crc kubenswrapper[5039]: I1124 14:11:42.203431 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5\" (UID: \"d6370bcf-3557-4e56-9c7b-670a2ec77ec0\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5" Nov 24 14:11:42 crc kubenswrapper[5039]: I1124 14:11:42.203528 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5\" (UID: \"d6370bcf-3557-4e56-9c7b-670a2ec77ec0\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5" Nov 24 14:11:42 crc kubenswrapper[5039]: I1124 14:11:42.203431 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5\" (UID: \"d6370bcf-3557-4e56-9c7b-670a2ec77ec0\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5" Nov 24 14:11:42 crc kubenswrapper[5039]: I1124 14:11:42.229123 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cj9t2\" (UniqueName: \"kubernetes.io/projected/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-kube-api-access-cj9t2\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5\" (UID: \"d6370bcf-3557-4e56-9c7b-670a2ec77ec0\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5" Nov 24 14:11:42 crc kubenswrapper[5039]: I1124 14:11:42.302169 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5" Nov 24 14:11:42 crc kubenswrapper[5039]: I1124 14:11:42.992353 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5"] Nov 24 14:11:43 crc kubenswrapper[5039]: I1124 14:11:43.880595 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5" event={"ID":"d6370bcf-3557-4e56-9c7b-670a2ec77ec0","Type":"ContainerStarted","Data":"ef834cb3ecbb20f6107f78201213141a591f9eaa481d49fc2393e2cf26b3ebac"} Nov 24 14:11:43 crc kubenswrapper[5039]: I1124 14:11:43.881371 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5" event={"ID":"d6370bcf-3557-4e56-9c7b-670a2ec77ec0","Type":"ContainerStarted","Data":"eafa232319524126e6b8e37d140a5ba6c67963b8773fce4ea2d7d854ce076f8a"} Nov 24 14:11:43 crc kubenswrapper[5039]: I1124 14:11:43.903839 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5" podStartSLOduration=2.4644252 podStartE2EDuration="2.903819045s" podCreationTimestamp="2025-11-24 14:11:41 +0000 UTC" firstStartedPulling="2025-11-24 14:11:42.984612327 +0000 UTC m=+3215.423736827" lastFinishedPulling="2025-11-24 14:11:43.424006172 +0000 UTC m=+3215.863130672" observedRunningTime="2025-11-24 14:11:43.897740917 +0000 UTC m=+3216.336865417" watchObservedRunningTime="2025-11-24 14:11:43.903819045 +0000 UTC m=+3216.342943545" Nov 24 14:12:41 crc kubenswrapper[5039]: I1124 14:12:41.498703 5039 generic.go:334] "Generic (PLEG): container finished" podID="d6370bcf-3557-4e56-9c7b-670a2ec77ec0" containerID="ef834cb3ecbb20f6107f78201213141a591f9eaa481d49fc2393e2cf26b3ebac" exitCode=0 Nov 24 14:12:41 crc kubenswrapper[5039]: I1124 14:12:41.498778 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5" event={"ID":"d6370bcf-3557-4e56-9c7b-670a2ec77ec0","Type":"ContainerDied","Data":"ef834cb3ecbb20f6107f78201213141a591f9eaa481d49fc2393e2cf26b3ebac"} Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.011414 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.187695 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-inventory\") pod \"d6370bcf-3557-4e56-9c7b-670a2ec77ec0\" (UID: \"d6370bcf-3557-4e56-9c7b-670a2ec77ec0\") " Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.187871 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-ceph\") pod \"d6370bcf-3557-4e56-9c7b-670a2ec77ec0\" (UID: \"d6370bcf-3557-4e56-9c7b-670a2ec77ec0\") " Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.189247 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cj9t2\" (UniqueName: \"kubernetes.io/projected/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-kube-api-access-cj9t2\") pod \"d6370bcf-3557-4e56-9c7b-670a2ec77ec0\" (UID: \"d6370bcf-3557-4e56-9c7b-670a2ec77ec0\") " Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.190001 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-ssh-key\") pod \"d6370bcf-3557-4e56-9c7b-670a2ec77ec0\" (UID: \"d6370bcf-3557-4e56-9c7b-670a2ec77ec0\") " Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.195726 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-kube-api-access-cj9t2" (OuterVolumeSpecName: "kube-api-access-cj9t2") pod "d6370bcf-3557-4e56-9c7b-670a2ec77ec0" (UID: "d6370bcf-3557-4e56-9c7b-670a2ec77ec0"). InnerVolumeSpecName "kube-api-access-cj9t2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.196472 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-ceph" (OuterVolumeSpecName: "ceph") pod "d6370bcf-3557-4e56-9c7b-670a2ec77ec0" (UID: "d6370bcf-3557-4e56-9c7b-670a2ec77ec0"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.225712 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-inventory" (OuterVolumeSpecName: "inventory") pod "d6370bcf-3557-4e56-9c7b-670a2ec77ec0" (UID: "d6370bcf-3557-4e56-9c7b-670a2ec77ec0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.228313 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d6370bcf-3557-4e56-9c7b-670a2ec77ec0" (UID: "d6370bcf-3557-4e56-9c7b-670a2ec77ec0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.294428 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.294481 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.294536 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cj9t2\" (UniqueName: \"kubernetes.io/projected/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-kube-api-access-cj9t2\") on node \"crc\" DevicePath \"\"" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.294559 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d6370bcf-3557-4e56-9c7b-670a2ec77ec0-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.525033 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5" event={"ID":"d6370bcf-3557-4e56-9c7b-670a2ec77ec0","Type":"ContainerDied","Data":"eafa232319524126e6b8e37d140a5ba6c67963b8773fce4ea2d7d854ce076f8a"} Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.525084 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eafa232319524126e6b8e37d140a5ba6c67963b8773fce4ea2d7d854ce076f8a" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.525170 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.623780 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-bqd9r"] Nov 24 14:12:43 crc kubenswrapper[5039]: E1124 14:12:43.624662 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6370bcf-3557-4e56-9c7b-670a2ec77ec0" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.624681 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6370bcf-3557-4e56-9c7b-670a2ec77ec0" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.624997 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6370bcf-3557-4e56-9c7b-670a2ec77ec0" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.625930 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-bqd9r" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.629190 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.629396 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.629674 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.629674 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.630203 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.633145 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-bqd9r"] Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.805781 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wck2h\" (UniqueName: \"kubernetes.io/projected/20f1c3b8-744c-45ab-b82c-07fab5659614-kube-api-access-wck2h\") pod \"ssh-known-hosts-edpm-deployment-bqd9r\" (UID: \"20f1c3b8-744c-45ab-b82c-07fab5659614\") " pod="openstack/ssh-known-hosts-edpm-deployment-bqd9r" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.805974 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/20f1c3b8-744c-45ab-b82c-07fab5659614-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-bqd9r\" (UID: \"20f1c3b8-744c-45ab-b82c-07fab5659614\") " pod="openstack/ssh-known-hosts-edpm-deployment-bqd9r" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.806074 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/20f1c3b8-744c-45ab-b82c-07fab5659614-ceph\") pod \"ssh-known-hosts-edpm-deployment-bqd9r\" (UID: \"20f1c3b8-744c-45ab-b82c-07fab5659614\") " pod="openstack/ssh-known-hosts-edpm-deployment-bqd9r" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.806092 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/20f1c3b8-744c-45ab-b82c-07fab5659614-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-bqd9r\" (UID: \"20f1c3b8-744c-45ab-b82c-07fab5659614\") " pod="openstack/ssh-known-hosts-edpm-deployment-bqd9r" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.908068 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/20f1c3b8-744c-45ab-b82c-07fab5659614-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-bqd9r\" (UID: \"20f1c3b8-744c-45ab-b82c-07fab5659614\") " pod="openstack/ssh-known-hosts-edpm-deployment-bqd9r" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.908177 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/20f1c3b8-744c-45ab-b82c-07fab5659614-ceph\") pod \"ssh-known-hosts-edpm-deployment-bqd9r\" (UID: \"20f1c3b8-744c-45ab-b82c-07fab5659614\") " pod="openstack/ssh-known-hosts-edpm-deployment-bqd9r" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.908206 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/20f1c3b8-744c-45ab-b82c-07fab5659614-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-bqd9r\" (UID: \"20f1c3b8-744c-45ab-b82c-07fab5659614\") " pod="openstack/ssh-known-hosts-edpm-deployment-bqd9r" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.908284 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wck2h\" (UniqueName: \"kubernetes.io/projected/20f1c3b8-744c-45ab-b82c-07fab5659614-kube-api-access-wck2h\") pod \"ssh-known-hosts-edpm-deployment-bqd9r\" (UID: \"20f1c3b8-744c-45ab-b82c-07fab5659614\") " pod="openstack/ssh-known-hosts-edpm-deployment-bqd9r" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.912921 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/20f1c3b8-744c-45ab-b82c-07fab5659614-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-bqd9r\" (UID: \"20f1c3b8-744c-45ab-b82c-07fab5659614\") " pod="openstack/ssh-known-hosts-edpm-deployment-bqd9r" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.914024 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/20f1c3b8-744c-45ab-b82c-07fab5659614-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-bqd9r\" (UID: \"20f1c3b8-744c-45ab-b82c-07fab5659614\") " pod="openstack/ssh-known-hosts-edpm-deployment-bqd9r" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.915189 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/20f1c3b8-744c-45ab-b82c-07fab5659614-ceph\") pod \"ssh-known-hosts-edpm-deployment-bqd9r\" (UID: \"20f1c3b8-744c-45ab-b82c-07fab5659614\") " pod="openstack/ssh-known-hosts-edpm-deployment-bqd9r" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.927387 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wck2h\" (UniqueName: \"kubernetes.io/projected/20f1c3b8-744c-45ab-b82c-07fab5659614-kube-api-access-wck2h\") pod \"ssh-known-hosts-edpm-deployment-bqd9r\" (UID: \"20f1c3b8-744c-45ab-b82c-07fab5659614\") " pod="openstack/ssh-known-hosts-edpm-deployment-bqd9r" Nov 24 14:12:43 crc kubenswrapper[5039]: I1124 14:12:43.951794 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-bqd9r" Nov 24 14:12:44 crc kubenswrapper[5039]: I1124 14:12:44.517640 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-bqd9r"] Nov 24 14:12:45 crc kubenswrapper[5039]: I1124 14:12:45.545076 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-bqd9r" event={"ID":"20f1c3b8-744c-45ab-b82c-07fab5659614","Type":"ContainerStarted","Data":"0bbca52873bcc1cd35f40c49f34919f080c2e6cba963c3cc6579b06ad5f1a71e"} Nov 24 14:12:45 crc kubenswrapper[5039]: I1124 14:12:45.545722 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-bqd9r" event={"ID":"20f1c3b8-744c-45ab-b82c-07fab5659614","Type":"ContainerStarted","Data":"870e7c6d86be78f888e1b18ddbcc1cabf91e02f3c15b302a74834b3609757b0e"} Nov 24 14:12:57 crc kubenswrapper[5039]: I1124 14:12:57.691639 5039 generic.go:334] "Generic (PLEG): container finished" podID="20f1c3b8-744c-45ab-b82c-07fab5659614" containerID="0bbca52873bcc1cd35f40c49f34919f080c2e6cba963c3cc6579b06ad5f1a71e" exitCode=0 Nov 24 14:12:57 crc kubenswrapper[5039]: I1124 14:12:57.691795 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-bqd9r" event={"ID":"20f1c3b8-744c-45ab-b82c-07fab5659614","Type":"ContainerDied","Data":"0bbca52873bcc1cd35f40c49f34919f080c2e6cba963c3cc6579b06ad5f1a71e"} Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.177984 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-bqd9r" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.267427 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/20f1c3b8-744c-45ab-b82c-07fab5659614-inventory-0\") pod \"20f1c3b8-744c-45ab-b82c-07fab5659614\" (UID: \"20f1c3b8-744c-45ab-b82c-07fab5659614\") " Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.267578 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/20f1c3b8-744c-45ab-b82c-07fab5659614-ssh-key-openstack-edpm-ipam\") pod \"20f1c3b8-744c-45ab-b82c-07fab5659614\" (UID: \"20f1c3b8-744c-45ab-b82c-07fab5659614\") " Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.267627 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wck2h\" (UniqueName: \"kubernetes.io/projected/20f1c3b8-744c-45ab-b82c-07fab5659614-kube-api-access-wck2h\") pod \"20f1c3b8-744c-45ab-b82c-07fab5659614\" (UID: \"20f1c3b8-744c-45ab-b82c-07fab5659614\") " Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.267693 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/20f1c3b8-744c-45ab-b82c-07fab5659614-ceph\") pod \"20f1c3b8-744c-45ab-b82c-07fab5659614\" (UID: \"20f1c3b8-744c-45ab-b82c-07fab5659614\") " Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.272733 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20f1c3b8-744c-45ab-b82c-07fab5659614-ceph" (OuterVolumeSpecName: "ceph") pod "20f1c3b8-744c-45ab-b82c-07fab5659614" (UID: "20f1c3b8-744c-45ab-b82c-07fab5659614"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.272833 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20f1c3b8-744c-45ab-b82c-07fab5659614-kube-api-access-wck2h" (OuterVolumeSpecName: "kube-api-access-wck2h") pod "20f1c3b8-744c-45ab-b82c-07fab5659614" (UID: "20f1c3b8-744c-45ab-b82c-07fab5659614"). InnerVolumeSpecName "kube-api-access-wck2h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.296668 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20f1c3b8-744c-45ab-b82c-07fab5659614-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "20f1c3b8-744c-45ab-b82c-07fab5659614" (UID: "20f1c3b8-744c-45ab-b82c-07fab5659614"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.299297 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20f1c3b8-744c-45ab-b82c-07fab5659614-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "20f1c3b8-744c-45ab-b82c-07fab5659614" (UID: "20f1c3b8-744c-45ab-b82c-07fab5659614"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.370638 5039 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/20f1c3b8-744c-45ab-b82c-07fab5659614-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.370676 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/20f1c3b8-744c-45ab-b82c-07fab5659614-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.370692 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wck2h\" (UniqueName: \"kubernetes.io/projected/20f1c3b8-744c-45ab-b82c-07fab5659614-kube-api-access-wck2h\") on node \"crc\" DevicePath \"\"" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.370704 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/20f1c3b8-744c-45ab-b82c-07fab5659614-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.712984 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-bqd9r" event={"ID":"20f1c3b8-744c-45ab-b82c-07fab5659614","Type":"ContainerDied","Data":"870e7c6d86be78f888e1b18ddbcc1cabf91e02f3c15b302a74834b3609757b0e"} Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.713030 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="870e7c6d86be78f888e1b18ddbcc1cabf91e02f3c15b302a74834b3609757b0e" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.713055 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-bqd9r" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.799125 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls"] Nov 24 14:12:59 crc kubenswrapper[5039]: E1124 14:12:59.799717 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20f1c3b8-744c-45ab-b82c-07fab5659614" containerName="ssh-known-hosts-edpm-deployment" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.799741 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="20f1c3b8-744c-45ab-b82c-07fab5659614" containerName="ssh-known-hosts-edpm-deployment" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.799950 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="20f1c3b8-744c-45ab-b82c-07fab5659614" containerName="ssh-known-hosts-edpm-deployment" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.800814 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.803019 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.803257 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.806676 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.806937 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.809148 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.817964 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls"] Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.982320 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7ef0c61d-e6e4-49d2-949c-ed412b59186f-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-hnwls\" (UID: \"7ef0c61d-e6e4-49d2-949c-ed412b59186f\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.982404 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqt6f\" (UniqueName: \"kubernetes.io/projected/7ef0c61d-e6e4-49d2-949c-ed412b59186f-kube-api-access-vqt6f\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-hnwls\" (UID: \"7ef0c61d-e6e4-49d2-949c-ed412b59186f\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.982453 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7ef0c61d-e6e4-49d2-949c-ed412b59186f-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-hnwls\" (UID: \"7ef0c61d-e6e4-49d2-949c-ed412b59186f\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls" Nov 24 14:12:59 crc kubenswrapper[5039]: I1124 14:12:59.982543 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7ef0c61d-e6e4-49d2-949c-ed412b59186f-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-hnwls\" (UID: \"7ef0c61d-e6e4-49d2-949c-ed412b59186f\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls" Nov 24 14:13:00 crc kubenswrapper[5039]: I1124 14:13:00.084449 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7ef0c61d-e6e4-49d2-949c-ed412b59186f-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-hnwls\" (UID: \"7ef0c61d-e6e4-49d2-949c-ed412b59186f\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls" Nov 24 14:13:00 crc kubenswrapper[5039]: I1124 14:13:00.084589 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqt6f\" (UniqueName: \"kubernetes.io/projected/7ef0c61d-e6e4-49d2-949c-ed412b59186f-kube-api-access-vqt6f\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-hnwls\" (UID: \"7ef0c61d-e6e4-49d2-949c-ed412b59186f\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls" Nov 24 14:13:00 crc kubenswrapper[5039]: I1124 14:13:00.084629 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7ef0c61d-e6e4-49d2-949c-ed412b59186f-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-hnwls\" (UID: \"7ef0c61d-e6e4-49d2-949c-ed412b59186f\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls" Nov 24 14:13:00 crc kubenswrapper[5039]: I1124 14:13:00.084669 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7ef0c61d-e6e4-49d2-949c-ed412b59186f-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-hnwls\" (UID: \"7ef0c61d-e6e4-49d2-949c-ed412b59186f\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls" Nov 24 14:13:00 crc kubenswrapper[5039]: I1124 14:13:00.092062 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7ef0c61d-e6e4-49d2-949c-ed412b59186f-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-hnwls\" (UID: \"7ef0c61d-e6e4-49d2-949c-ed412b59186f\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls" Nov 24 14:13:00 crc kubenswrapper[5039]: I1124 14:13:00.094005 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7ef0c61d-e6e4-49d2-949c-ed412b59186f-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-hnwls\" (UID: \"7ef0c61d-e6e4-49d2-949c-ed412b59186f\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls" Nov 24 14:13:00 crc kubenswrapper[5039]: I1124 14:13:00.097723 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7ef0c61d-e6e4-49d2-949c-ed412b59186f-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-hnwls\" (UID: \"7ef0c61d-e6e4-49d2-949c-ed412b59186f\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls" Nov 24 14:13:00 crc kubenswrapper[5039]: I1124 14:13:00.101818 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqt6f\" (UniqueName: \"kubernetes.io/projected/7ef0c61d-e6e4-49d2-949c-ed412b59186f-kube-api-access-vqt6f\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-hnwls\" (UID: \"7ef0c61d-e6e4-49d2-949c-ed412b59186f\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls" Nov 24 14:13:00 crc kubenswrapper[5039]: I1124 14:13:00.159001 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls" Nov 24 14:13:00 crc kubenswrapper[5039]: I1124 14:13:00.683078 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls"] Nov 24 14:13:00 crc kubenswrapper[5039]: W1124 14:13:00.689693 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ef0c61d_e6e4_49d2_949c_ed412b59186f.slice/crio-da35409f47e0cd6102f01cecad3f48b3ef29a125c5e578144ee9edce5be6df64 WatchSource:0}: Error finding container da35409f47e0cd6102f01cecad3f48b3ef29a125c5e578144ee9edce5be6df64: Status 404 returned error can't find the container with id da35409f47e0cd6102f01cecad3f48b3ef29a125c5e578144ee9edce5be6df64 Nov 24 14:13:00 crc kubenswrapper[5039]: I1124 14:13:00.724635 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls" event={"ID":"7ef0c61d-e6e4-49d2-949c-ed412b59186f","Type":"ContainerStarted","Data":"da35409f47e0cd6102f01cecad3f48b3ef29a125c5e578144ee9edce5be6df64"} Nov 24 14:13:03 crc kubenswrapper[5039]: I1124 14:13:03.374451 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls" event={"ID":"7ef0c61d-e6e4-49d2-949c-ed412b59186f","Type":"ContainerStarted","Data":"a851383dae79cabbb9430100e905432a92603c684466db1696e0996525296027"} Nov 24 14:13:03 crc kubenswrapper[5039]: I1124 14:13:03.388884 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls" podStartSLOduration=3.750334423 podStartE2EDuration="4.388861822s" podCreationTimestamp="2025-11-24 14:12:59 +0000 UTC" firstStartedPulling="2025-11-24 14:13:00.693638812 +0000 UTC m=+3293.132763312" lastFinishedPulling="2025-11-24 14:13:01.332166191 +0000 UTC m=+3293.771290711" observedRunningTime="2025-11-24 14:13:03.388806961 +0000 UTC m=+3295.827931461" watchObservedRunningTime="2025-11-24 14:13:03.388861822 +0000 UTC m=+3295.827986322" Nov 24 14:13:11 crc kubenswrapper[5039]: I1124 14:13:11.459476 5039 generic.go:334] "Generic (PLEG): container finished" podID="7ef0c61d-e6e4-49d2-949c-ed412b59186f" containerID="a851383dae79cabbb9430100e905432a92603c684466db1696e0996525296027" exitCode=0 Nov 24 14:13:11 crc kubenswrapper[5039]: I1124 14:13:11.459639 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls" event={"ID":"7ef0c61d-e6e4-49d2-949c-ed412b59186f","Type":"ContainerDied","Data":"a851383dae79cabbb9430100e905432a92603c684466db1696e0996525296027"} Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.044572 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.152353 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7ef0c61d-e6e4-49d2-949c-ed412b59186f-inventory\") pod \"7ef0c61d-e6e4-49d2-949c-ed412b59186f\" (UID: \"7ef0c61d-e6e4-49d2-949c-ed412b59186f\") " Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.152428 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vqt6f\" (UniqueName: \"kubernetes.io/projected/7ef0c61d-e6e4-49d2-949c-ed412b59186f-kube-api-access-vqt6f\") pod \"7ef0c61d-e6e4-49d2-949c-ed412b59186f\" (UID: \"7ef0c61d-e6e4-49d2-949c-ed412b59186f\") " Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.152484 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7ef0c61d-e6e4-49d2-949c-ed412b59186f-ssh-key\") pod \"7ef0c61d-e6e4-49d2-949c-ed412b59186f\" (UID: \"7ef0c61d-e6e4-49d2-949c-ed412b59186f\") " Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.152814 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7ef0c61d-e6e4-49d2-949c-ed412b59186f-ceph\") pod \"7ef0c61d-e6e4-49d2-949c-ed412b59186f\" (UID: \"7ef0c61d-e6e4-49d2-949c-ed412b59186f\") " Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.158039 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ef0c61d-e6e4-49d2-949c-ed412b59186f-ceph" (OuterVolumeSpecName: "ceph") pod "7ef0c61d-e6e4-49d2-949c-ed412b59186f" (UID: "7ef0c61d-e6e4-49d2-949c-ed412b59186f"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.158119 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ef0c61d-e6e4-49d2-949c-ed412b59186f-kube-api-access-vqt6f" (OuterVolumeSpecName: "kube-api-access-vqt6f") pod "7ef0c61d-e6e4-49d2-949c-ed412b59186f" (UID: "7ef0c61d-e6e4-49d2-949c-ed412b59186f"). InnerVolumeSpecName "kube-api-access-vqt6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.189766 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ef0c61d-e6e4-49d2-949c-ed412b59186f-inventory" (OuterVolumeSpecName: "inventory") pod "7ef0c61d-e6e4-49d2-949c-ed412b59186f" (UID: "7ef0c61d-e6e4-49d2-949c-ed412b59186f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.193246 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ef0c61d-e6e4-49d2-949c-ed412b59186f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7ef0c61d-e6e4-49d2-949c-ed412b59186f" (UID: "7ef0c61d-e6e4-49d2-949c-ed412b59186f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.255137 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7ef0c61d-e6e4-49d2-949c-ed412b59186f-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.255179 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7ef0c61d-e6e4-49d2-949c-ed412b59186f-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.255196 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vqt6f\" (UniqueName: \"kubernetes.io/projected/7ef0c61d-e6e4-49d2-949c-ed412b59186f-kube-api-access-vqt6f\") on node \"crc\" DevicePath \"\"" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.255207 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7ef0c61d-e6e4-49d2-949c-ed412b59186f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.481185 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls" event={"ID":"7ef0c61d-e6e4-49d2-949c-ed412b59186f","Type":"ContainerDied","Data":"da35409f47e0cd6102f01cecad3f48b3ef29a125c5e578144ee9edce5be6df64"} Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.481229 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-hnwls" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.481236 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da35409f47e0cd6102f01cecad3f48b3ef29a125c5e578144ee9edce5be6df64" Nov 24 14:13:13 crc kubenswrapper[5039]: E1124 14:13:13.579978 5039 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ef0c61d_e6e4_49d2_949c_ed412b59186f.slice\": RecentStats: unable to find data in memory cache]" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.581592 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m"] Nov 24 14:13:13 crc kubenswrapper[5039]: E1124 14:13:13.582890 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ef0c61d-e6e4-49d2-949c-ed412b59186f" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.582927 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ef0c61d-e6e4-49d2-949c-ed412b59186f" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.583669 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ef0c61d-e6e4-49d2-949c-ed412b59186f" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.585328 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.587668 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m"] Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.601076 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.601142 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.601373 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.601495 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.601590 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.664311 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwqb6\" (UniqueName: \"kubernetes.io/projected/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-kube-api-access-bwqb6\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m\" (UID: \"76784bc3-6a6c-4ecc-8799-daffb50a9ca3\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.664688 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m\" (UID: \"76784bc3-6a6c-4ecc-8799-daffb50a9ca3\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.664760 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m\" (UID: \"76784bc3-6a6c-4ecc-8799-daffb50a9ca3\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.664941 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m\" (UID: \"76784bc3-6a6c-4ecc-8799-daffb50a9ca3\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.766620 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m\" (UID: \"76784bc3-6a6c-4ecc-8799-daffb50a9ca3\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.766727 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m\" (UID: \"76784bc3-6a6c-4ecc-8799-daffb50a9ca3\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.766809 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwqb6\" (UniqueName: \"kubernetes.io/projected/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-kube-api-access-bwqb6\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m\" (UID: \"76784bc3-6a6c-4ecc-8799-daffb50a9ca3\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.766841 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m\" (UID: \"76784bc3-6a6c-4ecc-8799-daffb50a9ca3\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.770901 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m\" (UID: \"76784bc3-6a6c-4ecc-8799-daffb50a9ca3\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.777441 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m\" (UID: \"76784bc3-6a6c-4ecc-8799-daffb50a9ca3\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.777996 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m\" (UID: \"76784bc3-6a6c-4ecc-8799-daffb50a9ca3\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.793151 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwqb6\" (UniqueName: \"kubernetes.io/projected/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-kube-api-access-bwqb6\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m\" (UID: \"76784bc3-6a6c-4ecc-8799-daffb50a9ca3\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m" Nov 24 14:13:13 crc kubenswrapper[5039]: I1124 14:13:13.935902 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m" Nov 24 14:13:14 crc kubenswrapper[5039]: I1124 14:13:14.495558 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m"] Nov 24 14:13:15 crc kubenswrapper[5039]: I1124 14:13:15.507899 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m" event={"ID":"76784bc3-6a6c-4ecc-8799-daffb50a9ca3","Type":"ContainerStarted","Data":"3adfe48ab47e64247b57e7769faaac425565ff4d7f5f994f7cc0a7161925eeee"} Nov 24 14:13:15 crc kubenswrapper[5039]: I1124 14:13:15.509370 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m" event={"ID":"76784bc3-6a6c-4ecc-8799-daffb50a9ca3","Type":"ContainerStarted","Data":"b2a4b9d7e7cddfbc5fe68fab7e4bcdff54a805334e6e1bc8c100603ac9161afd"} Nov 24 14:13:15 crc kubenswrapper[5039]: I1124 14:13:15.542970 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m" podStartSLOduration=2.105921066 podStartE2EDuration="2.542939813s" podCreationTimestamp="2025-11-24 14:13:13 +0000 UTC" firstStartedPulling="2025-11-24 14:13:14.501258146 +0000 UTC m=+3306.940382646" lastFinishedPulling="2025-11-24 14:13:14.938276893 +0000 UTC m=+3307.377401393" observedRunningTime="2025-11-24 14:13:15.52809013 +0000 UTC m=+3307.967214630" watchObservedRunningTime="2025-11-24 14:13:15.542939813 +0000 UTC m=+3307.982064303" Nov 24 14:13:27 crc kubenswrapper[5039]: I1124 14:13:27.639939 5039 generic.go:334] "Generic (PLEG): container finished" podID="76784bc3-6a6c-4ecc-8799-daffb50a9ca3" containerID="3adfe48ab47e64247b57e7769faaac425565ff4d7f5f994f7cc0a7161925eeee" exitCode=0 Nov 24 14:13:27 crc kubenswrapper[5039]: I1124 14:13:27.640034 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m" event={"ID":"76784bc3-6a6c-4ecc-8799-daffb50a9ca3","Type":"ContainerDied","Data":"3adfe48ab47e64247b57e7769faaac425565ff4d7f5f994f7cc0a7161925eeee"} Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.164529 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.320874 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-ceph\") pod \"76784bc3-6a6c-4ecc-8799-daffb50a9ca3\" (UID: \"76784bc3-6a6c-4ecc-8799-daffb50a9ca3\") " Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.320957 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-ssh-key\") pod \"76784bc3-6a6c-4ecc-8799-daffb50a9ca3\" (UID: \"76784bc3-6a6c-4ecc-8799-daffb50a9ca3\") " Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.320995 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bwqb6\" (UniqueName: \"kubernetes.io/projected/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-kube-api-access-bwqb6\") pod \"76784bc3-6a6c-4ecc-8799-daffb50a9ca3\" (UID: \"76784bc3-6a6c-4ecc-8799-daffb50a9ca3\") " Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.321083 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-inventory\") pod \"76784bc3-6a6c-4ecc-8799-daffb50a9ca3\" (UID: \"76784bc3-6a6c-4ecc-8799-daffb50a9ca3\") " Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.326741 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-ceph" (OuterVolumeSpecName: "ceph") pod "76784bc3-6a6c-4ecc-8799-daffb50a9ca3" (UID: "76784bc3-6a6c-4ecc-8799-daffb50a9ca3"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.328415 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-kube-api-access-bwqb6" (OuterVolumeSpecName: "kube-api-access-bwqb6") pod "76784bc3-6a6c-4ecc-8799-daffb50a9ca3" (UID: "76784bc3-6a6c-4ecc-8799-daffb50a9ca3"). InnerVolumeSpecName "kube-api-access-bwqb6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.351987 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-inventory" (OuterVolumeSpecName: "inventory") pod "76784bc3-6a6c-4ecc-8799-daffb50a9ca3" (UID: "76784bc3-6a6c-4ecc-8799-daffb50a9ca3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.378671 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "76784bc3-6a6c-4ecc-8799-daffb50a9ca3" (UID: "76784bc3-6a6c-4ecc-8799-daffb50a9ca3"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.423636 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.423673 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.423719 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bwqb6\" (UniqueName: \"kubernetes.io/projected/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-kube-api-access-bwqb6\") on node \"crc\" DevicePath \"\"" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.423751 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/76784bc3-6a6c-4ecc-8799-daffb50a9ca3-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.659932 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m" event={"ID":"76784bc3-6a6c-4ecc-8799-daffb50a9ca3","Type":"ContainerDied","Data":"b2a4b9d7e7cddfbc5fe68fab7e4bcdff54a805334e6e1bc8c100603ac9161afd"} Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.659981 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2a4b9d7e7cddfbc5fe68fab7e4bcdff54a805334e6e1bc8c100603ac9161afd" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.660043 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.827966 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n"] Nov 24 14:13:29 crc kubenswrapper[5039]: E1124 14:13:29.828351 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76784bc3-6a6c-4ecc-8799-daffb50a9ca3" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.828368 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="76784bc3-6a6c-4ecc-8799-daffb50a9ca3" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.828595 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="76784bc3-6a6c-4ecc-8799-daffb50a9ca3" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.829311 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.832040 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.832282 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.832569 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.832762 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.832918 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.833442 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.834021 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.834487 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.834696 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.834804 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.844393 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n"] Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.935056 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.935107 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.935134 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.935153 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.935304 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.935345 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.935362 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.935383 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.935413 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.935464 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.935527 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.935634 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.935749 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8v45w\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-kube-api-access-8v45w\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.935806 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.935899 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.935925 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:29 crc kubenswrapper[5039]: I1124 14:13:29.935973 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.037604 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.037663 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.037693 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.037721 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.037759 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.037801 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.037836 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.037883 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.037920 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8v45w\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-kube-api-access-8v45w\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.037956 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.038003 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.038025 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.038061 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.038086 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.038120 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.038153 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.038181 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.042160 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.043156 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.043219 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.043270 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.044052 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.044266 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.044637 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.045174 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.046520 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.047377 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.047485 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.049205 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.050669 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.051130 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.051456 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.056553 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.061970 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8v45w\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-kube-api-access-8v45w\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.174027 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:13:30 crc kubenswrapper[5039]: I1124 14:13:30.717360 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n"] Nov 24 14:13:30 crc kubenswrapper[5039]: W1124 14:13:30.725685 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0270ae43_26ba_4706_827e_c008cf7ca4fa.slice/crio-33ac9a38ac197ea220e48c4893c79d764ad40584407aad1f5a10eb3ee159733a WatchSource:0}: Error finding container 33ac9a38ac197ea220e48c4893c79d764ad40584407aad1f5a10eb3ee159733a: Status 404 returned error can't find the container with id 33ac9a38ac197ea220e48c4893c79d764ad40584407aad1f5a10eb3ee159733a Nov 24 14:13:31 crc kubenswrapper[5039]: I1124 14:13:31.680486 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" event={"ID":"0270ae43-26ba-4706-827e-c008cf7ca4fa","Type":"ContainerStarted","Data":"c35b31c6bf3045cb05406f3a8196f871d7424fe9733166db1882d3b10a0b5619"} Nov 24 14:13:31 crc kubenswrapper[5039]: I1124 14:13:31.680859 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" event={"ID":"0270ae43-26ba-4706-827e-c008cf7ca4fa","Type":"ContainerStarted","Data":"33ac9a38ac197ea220e48c4893c79d764ad40584407aad1f5a10eb3ee159733a"} Nov 24 14:13:31 crc kubenswrapper[5039]: I1124 14:13:31.710797 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" podStartSLOduration=2.290757401 podStartE2EDuration="2.710779671s" podCreationTimestamp="2025-11-24 14:13:29 +0000 UTC" firstStartedPulling="2025-11-24 14:13:30.727343229 +0000 UTC m=+3323.166467729" lastFinishedPulling="2025-11-24 14:13:31.147365479 +0000 UTC m=+3323.586489999" observedRunningTime="2025-11-24 14:13:31.701410781 +0000 UTC m=+3324.140535291" watchObservedRunningTime="2025-11-24 14:13:31.710779671 +0000 UTC m=+3324.149904171" Nov 24 14:13:37 crc kubenswrapper[5039]: I1124 14:13:37.661433 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-srnw9"] Nov 24 14:13:37 crc kubenswrapper[5039]: I1124 14:13:37.664082 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-srnw9" Nov 24 14:13:37 crc kubenswrapper[5039]: I1124 14:13:37.674721 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-srnw9"] Nov 24 14:13:37 crc kubenswrapper[5039]: I1124 14:13:37.813904 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13a46435-cce5-4a37-8b41-6e183ecca8f6-catalog-content\") pod \"community-operators-srnw9\" (UID: \"13a46435-cce5-4a37-8b41-6e183ecca8f6\") " pod="openshift-marketplace/community-operators-srnw9" Nov 24 14:13:37 crc kubenswrapper[5039]: I1124 14:13:37.814280 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13a46435-cce5-4a37-8b41-6e183ecca8f6-utilities\") pod \"community-operators-srnw9\" (UID: \"13a46435-cce5-4a37-8b41-6e183ecca8f6\") " pod="openshift-marketplace/community-operators-srnw9" Nov 24 14:13:37 crc kubenswrapper[5039]: I1124 14:13:37.814481 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5t6zk\" (UniqueName: \"kubernetes.io/projected/13a46435-cce5-4a37-8b41-6e183ecca8f6-kube-api-access-5t6zk\") pod \"community-operators-srnw9\" (UID: \"13a46435-cce5-4a37-8b41-6e183ecca8f6\") " pod="openshift-marketplace/community-operators-srnw9" Nov 24 14:13:37 crc kubenswrapper[5039]: I1124 14:13:37.915929 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13a46435-cce5-4a37-8b41-6e183ecca8f6-catalog-content\") pod \"community-operators-srnw9\" (UID: \"13a46435-cce5-4a37-8b41-6e183ecca8f6\") " pod="openshift-marketplace/community-operators-srnw9" Nov 24 14:13:37 crc kubenswrapper[5039]: I1124 14:13:37.916170 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13a46435-cce5-4a37-8b41-6e183ecca8f6-utilities\") pod \"community-operators-srnw9\" (UID: \"13a46435-cce5-4a37-8b41-6e183ecca8f6\") " pod="openshift-marketplace/community-operators-srnw9" Nov 24 14:13:37 crc kubenswrapper[5039]: I1124 14:13:37.916297 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5t6zk\" (UniqueName: \"kubernetes.io/projected/13a46435-cce5-4a37-8b41-6e183ecca8f6-kube-api-access-5t6zk\") pod \"community-operators-srnw9\" (UID: \"13a46435-cce5-4a37-8b41-6e183ecca8f6\") " pod="openshift-marketplace/community-operators-srnw9" Nov 24 14:13:37 crc kubenswrapper[5039]: I1124 14:13:37.917226 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13a46435-cce5-4a37-8b41-6e183ecca8f6-catalog-content\") pod \"community-operators-srnw9\" (UID: \"13a46435-cce5-4a37-8b41-6e183ecca8f6\") " pod="openshift-marketplace/community-operators-srnw9" Nov 24 14:13:37 crc kubenswrapper[5039]: I1124 14:13:37.917614 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13a46435-cce5-4a37-8b41-6e183ecca8f6-utilities\") pod \"community-operators-srnw9\" (UID: \"13a46435-cce5-4a37-8b41-6e183ecca8f6\") " pod="openshift-marketplace/community-operators-srnw9" Nov 24 14:13:37 crc kubenswrapper[5039]: I1124 14:13:37.959230 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5t6zk\" (UniqueName: \"kubernetes.io/projected/13a46435-cce5-4a37-8b41-6e183ecca8f6-kube-api-access-5t6zk\") pod \"community-operators-srnw9\" (UID: \"13a46435-cce5-4a37-8b41-6e183ecca8f6\") " pod="openshift-marketplace/community-operators-srnw9" Nov 24 14:13:37 crc kubenswrapper[5039]: I1124 14:13:37.998183 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-srnw9" Nov 24 14:13:38 crc kubenswrapper[5039]: I1124 14:13:38.604288 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-srnw9"] Nov 24 14:13:38 crc kubenswrapper[5039]: I1124 14:13:38.745197 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-srnw9" event={"ID":"13a46435-cce5-4a37-8b41-6e183ecca8f6","Type":"ContainerStarted","Data":"704f97b7cae052b9c23eab20eb37efddbc302633cb5c0079bd0c6804fa0f5877"} Nov 24 14:13:39 crc kubenswrapper[5039]: I1124 14:13:39.755202 5039 generic.go:334] "Generic (PLEG): container finished" podID="13a46435-cce5-4a37-8b41-6e183ecca8f6" containerID="ff92ec781b1cf7ee5efdd80949584ccdb055b9b719c7d24762749a620da95dbb" exitCode=0 Nov 24 14:13:39 crc kubenswrapper[5039]: I1124 14:13:39.755523 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-srnw9" event={"ID":"13a46435-cce5-4a37-8b41-6e183ecca8f6","Type":"ContainerDied","Data":"ff92ec781b1cf7ee5efdd80949584ccdb055b9b719c7d24762749a620da95dbb"} Nov 24 14:13:44 crc kubenswrapper[5039]: I1124 14:13:44.809102 5039 generic.go:334] "Generic (PLEG): container finished" podID="13a46435-cce5-4a37-8b41-6e183ecca8f6" containerID="c5e2103b8c4bcbb32445d38d866b14d4529ae57bc3df8aedbd3d942543611da6" exitCode=0 Nov 24 14:13:44 crc kubenswrapper[5039]: I1124 14:13:44.809174 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-srnw9" event={"ID":"13a46435-cce5-4a37-8b41-6e183ecca8f6","Type":"ContainerDied","Data":"c5e2103b8c4bcbb32445d38d866b14d4529ae57bc3df8aedbd3d942543611da6"} Nov 24 14:13:45 crc kubenswrapper[5039]: I1124 14:13:45.822099 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-srnw9" event={"ID":"13a46435-cce5-4a37-8b41-6e183ecca8f6","Type":"ContainerStarted","Data":"fa58d45e9e4c8283ad96fbb7a721046753ccc000c83201e19cfd09f3511f912b"} Nov 24 14:13:45 crc kubenswrapper[5039]: I1124 14:13:45.845141 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-srnw9" podStartSLOduration=3.131442601 podStartE2EDuration="8.845122643s" podCreationTimestamp="2025-11-24 14:13:37 +0000 UTC" firstStartedPulling="2025-11-24 14:13:39.758345608 +0000 UTC m=+3332.197470108" lastFinishedPulling="2025-11-24 14:13:45.47202565 +0000 UTC m=+3337.911150150" observedRunningTime="2025-11-24 14:13:45.83891448 +0000 UTC m=+3338.278038980" watchObservedRunningTime="2025-11-24 14:13:45.845122643 +0000 UTC m=+3338.284247163" Nov 24 14:13:47 crc kubenswrapper[5039]: I1124 14:13:47.998924 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-srnw9" Nov 24 14:13:47 crc kubenswrapper[5039]: I1124 14:13:47.999307 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-srnw9" Nov 24 14:13:48 crc kubenswrapper[5039]: I1124 14:13:48.060260 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-srnw9" Nov 24 14:13:50 crc kubenswrapper[5039]: I1124 14:13:50.101544 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:13:50 crc kubenswrapper[5039]: I1124 14:13:50.101934 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:13:58 crc kubenswrapper[5039]: I1124 14:13:58.051401 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-srnw9" Nov 24 14:13:58 crc kubenswrapper[5039]: I1124 14:13:58.129401 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-srnw9"] Nov 24 14:13:58 crc kubenswrapper[5039]: I1124 14:13:58.174739 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-svzch"] Nov 24 14:13:58 crc kubenswrapper[5039]: I1124 14:13:58.175018 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-svzch" podUID="5e1f5668-c1c5-4a87-aec0-32a153351cf1" containerName="registry-server" containerID="cri-o://11dc4558c1f27246986908a78d83161b7d4694ef02946fa76ecdc69c55d119d7" gracePeriod=2 Nov 24 14:13:58 crc kubenswrapper[5039]: I1124 14:13:58.757019 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-svzch" Nov 24 14:13:58 crc kubenswrapper[5039]: I1124 14:13:58.870571 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e1f5668-c1c5-4a87-aec0-32a153351cf1-catalog-content\") pod \"5e1f5668-c1c5-4a87-aec0-32a153351cf1\" (UID: \"5e1f5668-c1c5-4a87-aec0-32a153351cf1\") " Nov 24 14:13:58 crc kubenswrapper[5039]: I1124 14:13:58.870645 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pxvkp\" (UniqueName: \"kubernetes.io/projected/5e1f5668-c1c5-4a87-aec0-32a153351cf1-kube-api-access-pxvkp\") pod \"5e1f5668-c1c5-4a87-aec0-32a153351cf1\" (UID: \"5e1f5668-c1c5-4a87-aec0-32a153351cf1\") " Nov 24 14:13:58 crc kubenswrapper[5039]: I1124 14:13:58.870879 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e1f5668-c1c5-4a87-aec0-32a153351cf1-utilities\") pod \"5e1f5668-c1c5-4a87-aec0-32a153351cf1\" (UID: \"5e1f5668-c1c5-4a87-aec0-32a153351cf1\") " Nov 24 14:13:58 crc kubenswrapper[5039]: I1124 14:13:58.871308 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e1f5668-c1c5-4a87-aec0-32a153351cf1-utilities" (OuterVolumeSpecName: "utilities") pod "5e1f5668-c1c5-4a87-aec0-32a153351cf1" (UID: "5e1f5668-c1c5-4a87-aec0-32a153351cf1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:13:58 crc kubenswrapper[5039]: I1124 14:13:58.873432 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e1f5668-c1c5-4a87-aec0-32a153351cf1-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:13:58 crc kubenswrapper[5039]: I1124 14:13:58.875807 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e1f5668-c1c5-4a87-aec0-32a153351cf1-kube-api-access-pxvkp" (OuterVolumeSpecName: "kube-api-access-pxvkp") pod "5e1f5668-c1c5-4a87-aec0-32a153351cf1" (UID: "5e1f5668-c1c5-4a87-aec0-32a153351cf1"). InnerVolumeSpecName "kube-api-access-pxvkp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:13:58 crc kubenswrapper[5039]: I1124 14:13:58.937767 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e1f5668-c1c5-4a87-aec0-32a153351cf1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5e1f5668-c1c5-4a87-aec0-32a153351cf1" (UID: "5e1f5668-c1c5-4a87-aec0-32a153351cf1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:13:58 crc kubenswrapper[5039]: I1124 14:13:58.961077 5039 generic.go:334] "Generic (PLEG): container finished" podID="5e1f5668-c1c5-4a87-aec0-32a153351cf1" containerID="11dc4558c1f27246986908a78d83161b7d4694ef02946fa76ecdc69c55d119d7" exitCode=0 Nov 24 14:13:58 crc kubenswrapper[5039]: I1124 14:13:58.961164 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-svzch" Nov 24 14:13:58 crc kubenswrapper[5039]: I1124 14:13:58.961173 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-svzch" event={"ID":"5e1f5668-c1c5-4a87-aec0-32a153351cf1","Type":"ContainerDied","Data":"11dc4558c1f27246986908a78d83161b7d4694ef02946fa76ecdc69c55d119d7"} Nov 24 14:13:58 crc kubenswrapper[5039]: I1124 14:13:58.961232 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-svzch" event={"ID":"5e1f5668-c1c5-4a87-aec0-32a153351cf1","Type":"ContainerDied","Data":"90ba8204dc56216170aa4cf804edf1cbf6d746afa32700fbbfad7a7ae870cc47"} Nov 24 14:13:58 crc kubenswrapper[5039]: I1124 14:13:58.961251 5039 scope.go:117] "RemoveContainer" containerID="11dc4558c1f27246986908a78d83161b7d4694ef02946fa76ecdc69c55d119d7" Nov 24 14:13:58 crc kubenswrapper[5039]: I1124 14:13:58.975377 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e1f5668-c1c5-4a87-aec0-32a153351cf1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:13:58 crc kubenswrapper[5039]: I1124 14:13:58.975407 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pxvkp\" (UniqueName: \"kubernetes.io/projected/5e1f5668-c1c5-4a87-aec0-32a153351cf1-kube-api-access-pxvkp\") on node \"crc\" DevicePath \"\"" Nov 24 14:13:58 crc kubenswrapper[5039]: I1124 14:13:58.997668 5039 scope.go:117] "RemoveContainer" containerID="01c4d23b218dcada6adaeab36c7af12834f9b679fdfb074590db7721323c4ff7" Nov 24 14:13:59 crc kubenswrapper[5039]: I1124 14:13:59.039950 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-svzch"] Nov 24 14:13:59 crc kubenswrapper[5039]: I1124 14:13:59.062796 5039 scope.go:117] "RemoveContainer" containerID="42c5c8393e93b491942c99da6b86f72f59b3b63c791e6234b59dea7f243a989d" Nov 24 14:13:59 crc kubenswrapper[5039]: I1124 14:13:59.095793 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-svzch"] Nov 24 14:13:59 crc kubenswrapper[5039]: I1124 14:13:59.110910 5039 scope.go:117] "RemoveContainer" containerID="11dc4558c1f27246986908a78d83161b7d4694ef02946fa76ecdc69c55d119d7" Nov 24 14:13:59 crc kubenswrapper[5039]: E1124 14:13:59.117761 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11dc4558c1f27246986908a78d83161b7d4694ef02946fa76ecdc69c55d119d7\": container with ID starting with 11dc4558c1f27246986908a78d83161b7d4694ef02946fa76ecdc69c55d119d7 not found: ID does not exist" containerID="11dc4558c1f27246986908a78d83161b7d4694ef02946fa76ecdc69c55d119d7" Nov 24 14:13:59 crc kubenswrapper[5039]: I1124 14:13:59.117876 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11dc4558c1f27246986908a78d83161b7d4694ef02946fa76ecdc69c55d119d7"} err="failed to get container status \"11dc4558c1f27246986908a78d83161b7d4694ef02946fa76ecdc69c55d119d7\": rpc error: code = NotFound desc = could not find container \"11dc4558c1f27246986908a78d83161b7d4694ef02946fa76ecdc69c55d119d7\": container with ID starting with 11dc4558c1f27246986908a78d83161b7d4694ef02946fa76ecdc69c55d119d7 not found: ID does not exist" Nov 24 14:13:59 crc kubenswrapper[5039]: I1124 14:13:59.117919 5039 scope.go:117] "RemoveContainer" containerID="01c4d23b218dcada6adaeab36c7af12834f9b679fdfb074590db7721323c4ff7" Nov 24 14:13:59 crc kubenswrapper[5039]: E1124 14:13:59.122642 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01c4d23b218dcada6adaeab36c7af12834f9b679fdfb074590db7721323c4ff7\": container with ID starting with 01c4d23b218dcada6adaeab36c7af12834f9b679fdfb074590db7721323c4ff7 not found: ID does not exist" containerID="01c4d23b218dcada6adaeab36c7af12834f9b679fdfb074590db7721323c4ff7" Nov 24 14:13:59 crc kubenswrapper[5039]: I1124 14:13:59.122710 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01c4d23b218dcada6adaeab36c7af12834f9b679fdfb074590db7721323c4ff7"} err="failed to get container status \"01c4d23b218dcada6adaeab36c7af12834f9b679fdfb074590db7721323c4ff7\": rpc error: code = NotFound desc = could not find container \"01c4d23b218dcada6adaeab36c7af12834f9b679fdfb074590db7721323c4ff7\": container with ID starting with 01c4d23b218dcada6adaeab36c7af12834f9b679fdfb074590db7721323c4ff7 not found: ID does not exist" Nov 24 14:13:59 crc kubenswrapper[5039]: I1124 14:13:59.122751 5039 scope.go:117] "RemoveContainer" containerID="42c5c8393e93b491942c99da6b86f72f59b3b63c791e6234b59dea7f243a989d" Nov 24 14:13:59 crc kubenswrapper[5039]: E1124 14:13:59.123175 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42c5c8393e93b491942c99da6b86f72f59b3b63c791e6234b59dea7f243a989d\": container with ID starting with 42c5c8393e93b491942c99da6b86f72f59b3b63c791e6234b59dea7f243a989d not found: ID does not exist" containerID="42c5c8393e93b491942c99da6b86f72f59b3b63c791e6234b59dea7f243a989d" Nov 24 14:13:59 crc kubenswrapper[5039]: I1124 14:13:59.123198 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42c5c8393e93b491942c99da6b86f72f59b3b63c791e6234b59dea7f243a989d"} err="failed to get container status \"42c5c8393e93b491942c99da6b86f72f59b3b63c791e6234b59dea7f243a989d\": rpc error: code = NotFound desc = could not find container \"42c5c8393e93b491942c99da6b86f72f59b3b63c791e6234b59dea7f243a989d\": container with ID starting with 42c5c8393e93b491942c99da6b86f72f59b3b63c791e6234b59dea7f243a989d not found: ID does not exist" Nov 24 14:14:00 crc kubenswrapper[5039]: I1124 14:14:00.321955 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e1f5668-c1c5-4a87-aec0-32a153351cf1" path="/var/lib/kubelet/pods/5e1f5668-c1c5-4a87-aec0-32a153351cf1/volumes" Nov 24 14:14:20 crc kubenswrapper[5039]: I1124 14:14:20.101560 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:14:20 crc kubenswrapper[5039]: I1124 14:14:20.102359 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:14:23 crc kubenswrapper[5039]: I1124 14:14:23.903578 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-p8gvp"] Nov 24 14:14:23 crc kubenswrapper[5039]: E1124 14:14:23.904687 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e1f5668-c1c5-4a87-aec0-32a153351cf1" containerName="extract-utilities" Nov 24 14:14:23 crc kubenswrapper[5039]: I1124 14:14:23.904703 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e1f5668-c1c5-4a87-aec0-32a153351cf1" containerName="extract-utilities" Nov 24 14:14:23 crc kubenswrapper[5039]: E1124 14:14:23.904726 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e1f5668-c1c5-4a87-aec0-32a153351cf1" containerName="extract-content" Nov 24 14:14:23 crc kubenswrapper[5039]: I1124 14:14:23.904734 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e1f5668-c1c5-4a87-aec0-32a153351cf1" containerName="extract-content" Nov 24 14:14:23 crc kubenswrapper[5039]: E1124 14:14:23.904770 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e1f5668-c1c5-4a87-aec0-32a153351cf1" containerName="registry-server" Nov 24 14:14:23 crc kubenswrapper[5039]: I1124 14:14:23.904779 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e1f5668-c1c5-4a87-aec0-32a153351cf1" containerName="registry-server" Nov 24 14:14:23 crc kubenswrapper[5039]: I1124 14:14:23.905095 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e1f5668-c1c5-4a87-aec0-32a153351cf1" containerName="registry-server" Nov 24 14:14:23 crc kubenswrapper[5039]: I1124 14:14:23.907098 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p8gvp" Nov 24 14:14:23 crc kubenswrapper[5039]: I1124 14:14:23.928047 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p8gvp"] Nov 24 14:14:24 crc kubenswrapper[5039]: I1124 14:14:24.069005 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c962a656-154e-4824-8a62-2342b585931f-utilities\") pod \"certified-operators-p8gvp\" (UID: \"c962a656-154e-4824-8a62-2342b585931f\") " pod="openshift-marketplace/certified-operators-p8gvp" Nov 24 14:14:24 crc kubenswrapper[5039]: I1124 14:14:24.069107 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctg7c\" (UniqueName: \"kubernetes.io/projected/c962a656-154e-4824-8a62-2342b585931f-kube-api-access-ctg7c\") pod \"certified-operators-p8gvp\" (UID: \"c962a656-154e-4824-8a62-2342b585931f\") " pod="openshift-marketplace/certified-operators-p8gvp" Nov 24 14:14:24 crc kubenswrapper[5039]: I1124 14:14:24.069255 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c962a656-154e-4824-8a62-2342b585931f-catalog-content\") pod \"certified-operators-p8gvp\" (UID: \"c962a656-154e-4824-8a62-2342b585931f\") " pod="openshift-marketplace/certified-operators-p8gvp" Nov 24 14:14:24 crc kubenswrapper[5039]: I1124 14:14:24.171834 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctg7c\" (UniqueName: \"kubernetes.io/projected/c962a656-154e-4824-8a62-2342b585931f-kube-api-access-ctg7c\") pod \"certified-operators-p8gvp\" (UID: \"c962a656-154e-4824-8a62-2342b585931f\") " pod="openshift-marketplace/certified-operators-p8gvp" Nov 24 14:14:24 crc kubenswrapper[5039]: I1124 14:14:24.171895 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c962a656-154e-4824-8a62-2342b585931f-catalog-content\") pod \"certified-operators-p8gvp\" (UID: \"c962a656-154e-4824-8a62-2342b585931f\") " pod="openshift-marketplace/certified-operators-p8gvp" Nov 24 14:14:24 crc kubenswrapper[5039]: I1124 14:14:24.172021 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c962a656-154e-4824-8a62-2342b585931f-utilities\") pod \"certified-operators-p8gvp\" (UID: \"c962a656-154e-4824-8a62-2342b585931f\") " pod="openshift-marketplace/certified-operators-p8gvp" Nov 24 14:14:24 crc kubenswrapper[5039]: I1124 14:14:24.172448 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c962a656-154e-4824-8a62-2342b585931f-utilities\") pod \"certified-operators-p8gvp\" (UID: \"c962a656-154e-4824-8a62-2342b585931f\") " pod="openshift-marketplace/certified-operators-p8gvp" Nov 24 14:14:24 crc kubenswrapper[5039]: I1124 14:14:24.172853 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c962a656-154e-4824-8a62-2342b585931f-catalog-content\") pod \"certified-operators-p8gvp\" (UID: \"c962a656-154e-4824-8a62-2342b585931f\") " pod="openshift-marketplace/certified-operators-p8gvp" Nov 24 14:14:24 crc kubenswrapper[5039]: I1124 14:14:24.205315 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctg7c\" (UniqueName: \"kubernetes.io/projected/c962a656-154e-4824-8a62-2342b585931f-kube-api-access-ctg7c\") pod \"certified-operators-p8gvp\" (UID: \"c962a656-154e-4824-8a62-2342b585931f\") " pod="openshift-marketplace/certified-operators-p8gvp" Nov 24 14:14:24 crc kubenswrapper[5039]: I1124 14:14:24.249410 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p8gvp" Nov 24 14:14:24 crc kubenswrapper[5039]: I1124 14:14:24.754221 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p8gvp"] Nov 24 14:14:25 crc kubenswrapper[5039]: I1124 14:14:25.225538 5039 generic.go:334] "Generic (PLEG): container finished" podID="c962a656-154e-4824-8a62-2342b585931f" containerID="5b3ed471a4d727be1f677ba198b86003829db5a52dc710e7c4d7b5e5319f5b2f" exitCode=0 Nov 24 14:14:25 crc kubenswrapper[5039]: I1124 14:14:25.225591 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p8gvp" event={"ID":"c962a656-154e-4824-8a62-2342b585931f","Type":"ContainerDied","Data":"5b3ed471a4d727be1f677ba198b86003829db5a52dc710e7c4d7b5e5319f5b2f"} Nov 24 14:14:25 crc kubenswrapper[5039]: I1124 14:14:25.225624 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p8gvp" event={"ID":"c962a656-154e-4824-8a62-2342b585931f","Type":"ContainerStarted","Data":"a0348e01a88ee87dc04e8370d4fef65305600b8b9f694501a0b2c3679a31ed13"} Nov 24 14:14:26 crc kubenswrapper[5039]: I1124 14:14:26.240619 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p8gvp" event={"ID":"c962a656-154e-4824-8a62-2342b585931f","Type":"ContainerStarted","Data":"92266b1dc4b32ec5c46a60b29e813cf97dd4f9d02b8c79ade3e64da763f837b2"} Nov 24 14:14:28 crc kubenswrapper[5039]: I1124 14:14:28.262650 5039 generic.go:334] "Generic (PLEG): container finished" podID="c962a656-154e-4824-8a62-2342b585931f" containerID="92266b1dc4b32ec5c46a60b29e813cf97dd4f9d02b8c79ade3e64da763f837b2" exitCode=0 Nov 24 14:14:28 crc kubenswrapper[5039]: I1124 14:14:28.263014 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p8gvp" event={"ID":"c962a656-154e-4824-8a62-2342b585931f","Type":"ContainerDied","Data":"92266b1dc4b32ec5c46a60b29e813cf97dd4f9d02b8c79ade3e64da763f837b2"} Nov 24 14:14:29 crc kubenswrapper[5039]: I1124 14:14:29.275849 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p8gvp" event={"ID":"c962a656-154e-4824-8a62-2342b585931f","Type":"ContainerStarted","Data":"7a3eb9c7a439f595979d35ec68c7065a953106b3e7c19308c80ef77aab02d161"} Nov 24 14:14:30 crc kubenswrapper[5039]: I1124 14:14:30.286418 5039 generic.go:334] "Generic (PLEG): container finished" podID="0270ae43-26ba-4706-827e-c008cf7ca4fa" containerID="c35b31c6bf3045cb05406f3a8196f871d7424fe9733166db1882d3b10a0b5619" exitCode=0 Nov 24 14:14:30 crc kubenswrapper[5039]: I1124 14:14:30.286539 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" event={"ID":"0270ae43-26ba-4706-827e-c008cf7ca4fa","Type":"ContainerDied","Data":"c35b31c6bf3045cb05406f3a8196f871d7424fe9733166db1882d3b10a0b5619"} Nov 24 14:14:30 crc kubenswrapper[5039]: I1124 14:14:30.312212 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-p8gvp" podStartSLOduration=3.872148444 podStartE2EDuration="7.31218814s" podCreationTimestamp="2025-11-24 14:14:23 +0000 UTC" firstStartedPulling="2025-11-24 14:14:25.228275751 +0000 UTC m=+3377.667400271" lastFinishedPulling="2025-11-24 14:14:28.668315467 +0000 UTC m=+3381.107439967" observedRunningTime="2025-11-24 14:14:29.295893599 +0000 UTC m=+3381.735018099" watchObservedRunningTime="2025-11-24 14:14:30.31218814 +0000 UTC m=+3382.751312650" Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.798380 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.951880 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-bootstrap-combined-ca-bundle\") pod \"0270ae43-26ba-4706-827e-c008cf7ca4fa\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.951983 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-ceph\") pod \"0270ae43-26ba-4706-827e-c008cf7ca4fa\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.952051 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"0270ae43-26ba-4706-827e-c008cf7ca4fa\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.952096 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-nova-combined-ca-bundle\") pod \"0270ae43-26ba-4706-827e-c008cf7ca4fa\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.952146 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"0270ae43-26ba-4706-827e-c008cf7ca4fa\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.952251 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8v45w\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-kube-api-access-8v45w\") pod \"0270ae43-26ba-4706-827e-c008cf7ca4fa\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.952307 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"0270ae43-26ba-4706-827e-c008cf7ca4fa\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.952354 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-ovn-default-certs-0\") pod \"0270ae43-26ba-4706-827e-c008cf7ca4fa\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.952394 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"0270ae43-26ba-4706-827e-c008cf7ca4fa\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.952432 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-inventory\") pod \"0270ae43-26ba-4706-827e-c008cf7ca4fa\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.952498 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-repo-setup-combined-ca-bundle\") pod \"0270ae43-26ba-4706-827e-c008cf7ca4fa\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.952564 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-telemetry-combined-ca-bundle\") pod \"0270ae43-26ba-4706-827e-c008cf7ca4fa\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.952620 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-ovn-combined-ca-bundle\") pod \"0270ae43-26ba-4706-827e-c008cf7ca4fa\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.952657 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-neutron-metadata-combined-ca-bundle\") pod \"0270ae43-26ba-4706-827e-c008cf7ca4fa\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.952695 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-ssh-key\") pod \"0270ae43-26ba-4706-827e-c008cf7ca4fa\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.952998 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-libvirt-combined-ca-bundle\") pod \"0270ae43-26ba-4706-827e-c008cf7ca4fa\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.953373 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-telemetry-power-monitoring-combined-ca-bundle\") pod \"0270ae43-26ba-4706-827e-c008cf7ca4fa\" (UID: \"0270ae43-26ba-4706-827e-c008cf7ca4fa\") " Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.958353 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "0270ae43-26ba-4706-827e-c008cf7ca4fa" (UID: "0270ae43-26ba-4706-827e-c008cf7ca4fa"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.958448 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-kube-api-access-8v45w" (OuterVolumeSpecName: "kube-api-access-8v45w") pod "0270ae43-26ba-4706-827e-c008cf7ca4fa" (UID: "0270ae43-26ba-4706-827e-c008cf7ca4fa"). InnerVolumeSpecName "kube-api-access-8v45w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.959986 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "0270ae43-26ba-4706-827e-c008cf7ca4fa" (UID: "0270ae43-26ba-4706-827e-c008cf7ca4fa"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.960070 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-ceph" (OuterVolumeSpecName: "ceph") pod "0270ae43-26ba-4706-827e-c008cf7ca4fa" (UID: "0270ae43-26ba-4706-827e-c008cf7ca4fa"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.960089 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "0270ae43-26ba-4706-827e-c008cf7ca4fa" (UID: "0270ae43-26ba-4706-827e-c008cf7ca4fa"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.960820 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "0270ae43-26ba-4706-827e-c008cf7ca4fa" (UID: "0270ae43-26ba-4706-827e-c008cf7ca4fa"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.961549 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "0270ae43-26ba-4706-827e-c008cf7ca4fa" (UID: "0270ae43-26ba-4706-827e-c008cf7ca4fa"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.961630 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-telemetry-power-monitoring-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-power-monitoring-combined-ca-bundle") pod "0270ae43-26ba-4706-827e-c008cf7ca4fa" (UID: "0270ae43-26ba-4706-827e-c008cf7ca4fa"). InnerVolumeSpecName "telemetry-power-monitoring-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.961998 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "0270ae43-26ba-4706-827e-c008cf7ca4fa" (UID: "0270ae43-26ba-4706-827e-c008cf7ca4fa"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.962904 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "0270ae43-26ba-4706-827e-c008cf7ca4fa" (UID: "0270ae43-26ba-4706-827e-c008cf7ca4fa"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.963761 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0") pod "0270ae43-26ba-4706-827e-c008cf7ca4fa" (UID: "0270ae43-26ba-4706-827e-c008cf7ca4fa"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.963788 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "0270ae43-26ba-4706-827e-c008cf7ca4fa" (UID: "0270ae43-26ba-4706-827e-c008cf7ca4fa"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.964167 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "0270ae43-26ba-4706-827e-c008cf7ca4fa" (UID: "0270ae43-26ba-4706-827e-c008cf7ca4fa"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.964906 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "0270ae43-26ba-4706-827e-c008cf7ca4fa" (UID: "0270ae43-26ba-4706-827e-c008cf7ca4fa"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.973820 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "0270ae43-26ba-4706-827e-c008cf7ca4fa" (UID: "0270ae43-26ba-4706-827e-c008cf7ca4fa"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.988946 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-inventory" (OuterVolumeSpecName: "inventory") pod "0270ae43-26ba-4706-827e-c008cf7ca4fa" (UID: "0270ae43-26ba-4706-827e-c008cf7ca4fa"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:14:31 crc kubenswrapper[5039]: I1124 14:14:31.995650 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0270ae43-26ba-4706-827e-c008cf7ca4fa" (UID: "0270ae43-26ba-4706-827e-c008cf7ca4fa"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.055948 5039 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.055994 5039 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.056015 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.056027 5039 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.056039 5039 reconciler_common.go:293] "Volume detached for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-telemetry-power-monitoring-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.056052 5039 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.056063 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.056072 5039 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.056082 5039 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.056095 5039 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.056110 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8v45w\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-kube-api-access-8v45w\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.056121 5039 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.056134 5039 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.056146 5039 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/0270ae43-26ba-4706-827e-c008cf7ca4fa-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.056157 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.056167 5039 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.056179 5039 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0270ae43-26ba-4706-827e-c008cf7ca4fa-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.338957 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.358042 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n" event={"ID":"0270ae43-26ba-4706-827e-c008cf7ca4fa","Type":"ContainerDied","Data":"33ac9a38ac197ea220e48c4893c79d764ad40584407aad1f5a10eb3ee159733a"} Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.358091 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="33ac9a38ac197ea220e48c4893c79d764ad40584407aad1f5a10eb3ee159733a" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.416127 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g"] Nov 24 14:14:32 crc kubenswrapper[5039]: E1124 14:14:32.416639 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0270ae43-26ba-4706-827e-c008cf7ca4fa" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.416661 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0270ae43-26ba-4706-827e-c008cf7ca4fa" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.416938 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0270ae43-26ba-4706-827e-c008cf7ca4fa" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.417873 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.419950 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.420975 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.421078 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.422974 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.430605 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.454400 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g"] Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.565853 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pwjzq\" (UniqueName: \"kubernetes.io/projected/c4761a4e-a177-4629-812b-8f940a7c5b98-kube-api-access-pwjzq\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g\" (UID: \"c4761a4e-a177-4629-812b-8f940a7c5b98\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.566305 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c4761a4e-a177-4629-812b-8f940a7c5b98-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g\" (UID: \"c4761a4e-a177-4629-812b-8f940a7c5b98\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.566385 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c4761a4e-a177-4629-812b-8f940a7c5b98-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g\" (UID: \"c4761a4e-a177-4629-812b-8f940a7c5b98\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.566436 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c4761a4e-a177-4629-812b-8f940a7c5b98-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g\" (UID: \"c4761a4e-a177-4629-812b-8f940a7c5b98\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.668784 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pwjzq\" (UniqueName: \"kubernetes.io/projected/c4761a4e-a177-4629-812b-8f940a7c5b98-kube-api-access-pwjzq\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g\" (UID: \"c4761a4e-a177-4629-812b-8f940a7c5b98\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.668986 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c4761a4e-a177-4629-812b-8f940a7c5b98-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g\" (UID: \"c4761a4e-a177-4629-812b-8f940a7c5b98\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.669134 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c4761a4e-a177-4629-812b-8f940a7c5b98-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g\" (UID: \"c4761a4e-a177-4629-812b-8f940a7c5b98\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.669272 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c4761a4e-a177-4629-812b-8f940a7c5b98-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g\" (UID: \"c4761a4e-a177-4629-812b-8f940a7c5b98\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.674326 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c4761a4e-a177-4629-812b-8f940a7c5b98-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g\" (UID: \"c4761a4e-a177-4629-812b-8f940a7c5b98\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.674830 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c4761a4e-a177-4629-812b-8f940a7c5b98-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g\" (UID: \"c4761a4e-a177-4629-812b-8f940a7c5b98\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.684808 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c4761a4e-a177-4629-812b-8f940a7c5b98-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g\" (UID: \"c4761a4e-a177-4629-812b-8f940a7c5b98\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.686064 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pwjzq\" (UniqueName: \"kubernetes.io/projected/c4761a4e-a177-4629-812b-8f940a7c5b98-kube-api-access-pwjzq\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g\" (UID: \"c4761a4e-a177-4629-812b-8f940a7c5b98\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g" Nov 24 14:14:32 crc kubenswrapper[5039]: I1124 14:14:32.734611 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g" Nov 24 14:14:33 crc kubenswrapper[5039]: I1124 14:14:33.251526 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g"] Nov 24 14:14:33 crc kubenswrapper[5039]: W1124 14:14:33.258408 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc4761a4e_a177_4629_812b_8f940a7c5b98.slice/crio-f6cb1ef69e5af1d13b683954a1c254cc1e5ce4a7316aa706143b9f14edb77c14 WatchSource:0}: Error finding container f6cb1ef69e5af1d13b683954a1c254cc1e5ce4a7316aa706143b9f14edb77c14: Status 404 returned error can't find the container with id f6cb1ef69e5af1d13b683954a1c254cc1e5ce4a7316aa706143b9f14edb77c14 Nov 24 14:14:33 crc kubenswrapper[5039]: I1124 14:14:33.261652 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 14:14:33 crc kubenswrapper[5039]: I1124 14:14:33.348218 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g" event={"ID":"c4761a4e-a177-4629-812b-8f940a7c5b98","Type":"ContainerStarted","Data":"f6cb1ef69e5af1d13b683954a1c254cc1e5ce4a7316aa706143b9f14edb77c14"} Nov 24 14:14:34 crc kubenswrapper[5039]: I1124 14:14:34.250142 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-p8gvp" Nov 24 14:14:34 crc kubenswrapper[5039]: I1124 14:14:34.250698 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-p8gvp" Nov 24 14:14:34 crc kubenswrapper[5039]: I1124 14:14:34.321978 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-p8gvp" Nov 24 14:14:34 crc kubenswrapper[5039]: I1124 14:14:34.364183 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g" event={"ID":"c4761a4e-a177-4629-812b-8f940a7c5b98","Type":"ContainerStarted","Data":"97154af0e6c9d2fca3a5996a1461cfa46078d262f973e2f9053c1828f86e9b87"} Nov 24 14:14:34 crc kubenswrapper[5039]: I1124 14:14:34.385182 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g" podStartSLOduration=2.000788394 podStartE2EDuration="2.385159718s" podCreationTimestamp="2025-11-24 14:14:32 +0000 UTC" firstStartedPulling="2025-11-24 14:14:33.261368244 +0000 UTC m=+3385.700492744" lastFinishedPulling="2025-11-24 14:14:33.645739568 +0000 UTC m=+3386.084864068" observedRunningTime="2025-11-24 14:14:34.381004958 +0000 UTC m=+3386.820129468" watchObservedRunningTime="2025-11-24 14:14:34.385159718 +0000 UTC m=+3386.824284218" Nov 24 14:14:34 crc kubenswrapper[5039]: I1124 14:14:34.408441 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-p8gvp" Nov 24 14:14:34 crc kubenswrapper[5039]: I1124 14:14:34.559029 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-p8gvp"] Nov 24 14:14:36 crc kubenswrapper[5039]: I1124 14:14:36.382394 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-p8gvp" podUID="c962a656-154e-4824-8a62-2342b585931f" containerName="registry-server" containerID="cri-o://7a3eb9c7a439f595979d35ec68c7065a953106b3e7c19308c80ef77aab02d161" gracePeriod=2 Nov 24 14:14:36 crc kubenswrapper[5039]: I1124 14:14:36.957637 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p8gvp" Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.053802 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c962a656-154e-4824-8a62-2342b585931f-utilities\") pod \"c962a656-154e-4824-8a62-2342b585931f\" (UID: \"c962a656-154e-4824-8a62-2342b585931f\") " Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.053915 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctg7c\" (UniqueName: \"kubernetes.io/projected/c962a656-154e-4824-8a62-2342b585931f-kube-api-access-ctg7c\") pod \"c962a656-154e-4824-8a62-2342b585931f\" (UID: \"c962a656-154e-4824-8a62-2342b585931f\") " Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.053982 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c962a656-154e-4824-8a62-2342b585931f-catalog-content\") pod \"c962a656-154e-4824-8a62-2342b585931f\" (UID: \"c962a656-154e-4824-8a62-2342b585931f\") " Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.054890 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c962a656-154e-4824-8a62-2342b585931f-utilities" (OuterVolumeSpecName: "utilities") pod "c962a656-154e-4824-8a62-2342b585931f" (UID: "c962a656-154e-4824-8a62-2342b585931f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.059449 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c962a656-154e-4824-8a62-2342b585931f-kube-api-access-ctg7c" (OuterVolumeSpecName: "kube-api-access-ctg7c") pod "c962a656-154e-4824-8a62-2342b585931f" (UID: "c962a656-154e-4824-8a62-2342b585931f"). InnerVolumeSpecName "kube-api-access-ctg7c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.100811 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c962a656-154e-4824-8a62-2342b585931f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c962a656-154e-4824-8a62-2342b585931f" (UID: "c962a656-154e-4824-8a62-2342b585931f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.156547 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c962a656-154e-4824-8a62-2342b585931f-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.156593 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctg7c\" (UniqueName: \"kubernetes.io/projected/c962a656-154e-4824-8a62-2342b585931f-kube-api-access-ctg7c\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.156606 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c962a656-154e-4824-8a62-2342b585931f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.392804 5039 generic.go:334] "Generic (PLEG): container finished" podID="c962a656-154e-4824-8a62-2342b585931f" containerID="7a3eb9c7a439f595979d35ec68c7065a953106b3e7c19308c80ef77aab02d161" exitCode=0 Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.392850 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p8gvp" event={"ID":"c962a656-154e-4824-8a62-2342b585931f","Type":"ContainerDied","Data":"7a3eb9c7a439f595979d35ec68c7065a953106b3e7c19308c80ef77aab02d161"} Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.392891 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p8gvp" Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.392919 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p8gvp" event={"ID":"c962a656-154e-4824-8a62-2342b585931f","Type":"ContainerDied","Data":"a0348e01a88ee87dc04e8370d4fef65305600b8b9f694501a0b2c3679a31ed13"} Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.392943 5039 scope.go:117] "RemoveContainer" containerID="7a3eb9c7a439f595979d35ec68c7065a953106b3e7c19308c80ef77aab02d161" Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.414800 5039 scope.go:117] "RemoveContainer" containerID="92266b1dc4b32ec5c46a60b29e813cf97dd4f9d02b8c79ade3e64da763f837b2" Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.439582 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-p8gvp"] Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.445602 5039 scope.go:117] "RemoveContainer" containerID="5b3ed471a4d727be1f677ba198b86003829db5a52dc710e7c4d7b5e5319f5b2f" Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.447837 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-p8gvp"] Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.499368 5039 scope.go:117] "RemoveContainer" containerID="7a3eb9c7a439f595979d35ec68c7065a953106b3e7c19308c80ef77aab02d161" Nov 24 14:14:37 crc kubenswrapper[5039]: E1124 14:14:37.499862 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a3eb9c7a439f595979d35ec68c7065a953106b3e7c19308c80ef77aab02d161\": container with ID starting with 7a3eb9c7a439f595979d35ec68c7065a953106b3e7c19308c80ef77aab02d161 not found: ID does not exist" containerID="7a3eb9c7a439f595979d35ec68c7065a953106b3e7c19308c80ef77aab02d161" Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.499891 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a3eb9c7a439f595979d35ec68c7065a953106b3e7c19308c80ef77aab02d161"} err="failed to get container status \"7a3eb9c7a439f595979d35ec68c7065a953106b3e7c19308c80ef77aab02d161\": rpc error: code = NotFound desc = could not find container \"7a3eb9c7a439f595979d35ec68c7065a953106b3e7c19308c80ef77aab02d161\": container with ID starting with 7a3eb9c7a439f595979d35ec68c7065a953106b3e7c19308c80ef77aab02d161 not found: ID does not exist" Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.499924 5039 scope.go:117] "RemoveContainer" containerID="92266b1dc4b32ec5c46a60b29e813cf97dd4f9d02b8c79ade3e64da763f837b2" Nov 24 14:14:37 crc kubenswrapper[5039]: E1124 14:14:37.500246 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92266b1dc4b32ec5c46a60b29e813cf97dd4f9d02b8c79ade3e64da763f837b2\": container with ID starting with 92266b1dc4b32ec5c46a60b29e813cf97dd4f9d02b8c79ade3e64da763f837b2 not found: ID does not exist" containerID="92266b1dc4b32ec5c46a60b29e813cf97dd4f9d02b8c79ade3e64da763f837b2" Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.500271 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92266b1dc4b32ec5c46a60b29e813cf97dd4f9d02b8c79ade3e64da763f837b2"} err="failed to get container status \"92266b1dc4b32ec5c46a60b29e813cf97dd4f9d02b8c79ade3e64da763f837b2\": rpc error: code = NotFound desc = could not find container \"92266b1dc4b32ec5c46a60b29e813cf97dd4f9d02b8c79ade3e64da763f837b2\": container with ID starting with 92266b1dc4b32ec5c46a60b29e813cf97dd4f9d02b8c79ade3e64da763f837b2 not found: ID does not exist" Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.500286 5039 scope.go:117] "RemoveContainer" containerID="5b3ed471a4d727be1f677ba198b86003829db5a52dc710e7c4d7b5e5319f5b2f" Nov 24 14:14:37 crc kubenswrapper[5039]: E1124 14:14:37.500816 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b3ed471a4d727be1f677ba198b86003829db5a52dc710e7c4d7b5e5319f5b2f\": container with ID starting with 5b3ed471a4d727be1f677ba198b86003829db5a52dc710e7c4d7b5e5319f5b2f not found: ID does not exist" containerID="5b3ed471a4d727be1f677ba198b86003829db5a52dc710e7c4d7b5e5319f5b2f" Nov 24 14:14:37 crc kubenswrapper[5039]: I1124 14:14:37.500865 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b3ed471a4d727be1f677ba198b86003829db5a52dc710e7c4d7b5e5319f5b2f"} err="failed to get container status \"5b3ed471a4d727be1f677ba198b86003829db5a52dc710e7c4d7b5e5319f5b2f\": rpc error: code = NotFound desc = could not find container \"5b3ed471a4d727be1f677ba198b86003829db5a52dc710e7c4d7b5e5319f5b2f\": container with ID starting with 5b3ed471a4d727be1f677ba198b86003829db5a52dc710e7c4d7b5e5319f5b2f not found: ID does not exist" Nov 24 14:14:38 crc kubenswrapper[5039]: I1124 14:14:38.319174 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c962a656-154e-4824-8a62-2342b585931f" path="/var/lib/kubelet/pods/c962a656-154e-4824-8a62-2342b585931f/volumes" Nov 24 14:14:40 crc kubenswrapper[5039]: I1124 14:14:40.423986 5039 generic.go:334] "Generic (PLEG): container finished" podID="c4761a4e-a177-4629-812b-8f940a7c5b98" containerID="97154af0e6c9d2fca3a5996a1461cfa46078d262f973e2f9053c1828f86e9b87" exitCode=0 Nov 24 14:14:40 crc kubenswrapper[5039]: I1124 14:14:40.424115 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g" event={"ID":"c4761a4e-a177-4629-812b-8f940a7c5b98","Type":"ContainerDied","Data":"97154af0e6c9d2fca3a5996a1461cfa46078d262f973e2f9053c1828f86e9b87"} Nov 24 14:14:41 crc kubenswrapper[5039]: I1124 14:14:41.929090 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g" Nov 24 14:14:41 crc kubenswrapper[5039]: I1124 14:14:41.953557 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c4761a4e-a177-4629-812b-8f940a7c5b98-inventory\") pod \"c4761a4e-a177-4629-812b-8f940a7c5b98\" (UID: \"c4761a4e-a177-4629-812b-8f940a7c5b98\") " Nov 24 14:14:41 crc kubenswrapper[5039]: I1124 14:14:41.953687 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c4761a4e-a177-4629-812b-8f940a7c5b98-ceph\") pod \"c4761a4e-a177-4629-812b-8f940a7c5b98\" (UID: \"c4761a4e-a177-4629-812b-8f940a7c5b98\") " Nov 24 14:14:41 crc kubenswrapper[5039]: I1124 14:14:41.953781 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c4761a4e-a177-4629-812b-8f940a7c5b98-ssh-key\") pod \"c4761a4e-a177-4629-812b-8f940a7c5b98\" (UID: \"c4761a4e-a177-4629-812b-8f940a7c5b98\") " Nov 24 14:14:41 crc kubenswrapper[5039]: I1124 14:14:41.953826 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pwjzq\" (UniqueName: \"kubernetes.io/projected/c4761a4e-a177-4629-812b-8f940a7c5b98-kube-api-access-pwjzq\") pod \"c4761a4e-a177-4629-812b-8f940a7c5b98\" (UID: \"c4761a4e-a177-4629-812b-8f940a7c5b98\") " Nov 24 14:14:41 crc kubenswrapper[5039]: I1124 14:14:41.959623 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4761a4e-a177-4629-812b-8f940a7c5b98-ceph" (OuterVolumeSpecName: "ceph") pod "c4761a4e-a177-4629-812b-8f940a7c5b98" (UID: "c4761a4e-a177-4629-812b-8f940a7c5b98"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:14:41 crc kubenswrapper[5039]: I1124 14:14:41.961652 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4761a4e-a177-4629-812b-8f940a7c5b98-kube-api-access-pwjzq" (OuterVolumeSpecName: "kube-api-access-pwjzq") pod "c4761a4e-a177-4629-812b-8f940a7c5b98" (UID: "c4761a4e-a177-4629-812b-8f940a7c5b98"). InnerVolumeSpecName "kube-api-access-pwjzq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:14:41 crc kubenswrapper[5039]: I1124 14:14:41.994018 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4761a4e-a177-4629-812b-8f940a7c5b98-inventory" (OuterVolumeSpecName: "inventory") pod "c4761a4e-a177-4629-812b-8f940a7c5b98" (UID: "c4761a4e-a177-4629-812b-8f940a7c5b98"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.028710 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4761a4e-a177-4629-812b-8f940a7c5b98-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c4761a4e-a177-4629-812b-8f940a7c5b98" (UID: "c4761a4e-a177-4629-812b-8f940a7c5b98"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.056566 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c4761a4e-a177-4629-812b-8f940a7c5b98-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.056610 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c4761a4e-a177-4629-812b-8f940a7c5b98-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.056624 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c4761a4e-a177-4629-812b-8f940a7c5b98-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.056636 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pwjzq\" (UniqueName: \"kubernetes.io/projected/c4761a4e-a177-4629-812b-8f940a7c5b98-kube-api-access-pwjzq\") on node \"crc\" DevicePath \"\"" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.442744 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g" event={"ID":"c4761a4e-a177-4629-812b-8f940a7c5b98","Type":"ContainerDied","Data":"f6cb1ef69e5af1d13b683954a1c254cc1e5ce4a7316aa706143b9f14edb77c14"} Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.442786 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f6cb1ef69e5af1d13b683954a1c254cc1e5ce4a7316aa706143b9f14edb77c14" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.442797 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.529917 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4"] Nov 24 14:14:42 crc kubenswrapper[5039]: E1124 14:14:42.530425 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c962a656-154e-4824-8a62-2342b585931f" containerName="extract-utilities" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.530451 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="c962a656-154e-4824-8a62-2342b585931f" containerName="extract-utilities" Nov 24 14:14:42 crc kubenswrapper[5039]: E1124 14:14:42.530475 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c962a656-154e-4824-8a62-2342b585931f" containerName="extract-content" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.530484 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="c962a656-154e-4824-8a62-2342b585931f" containerName="extract-content" Nov 24 14:14:42 crc kubenswrapper[5039]: E1124 14:14:42.530531 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4761a4e-a177-4629-812b-8f940a7c5b98" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.530544 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4761a4e-a177-4629-812b-8f940a7c5b98" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Nov 24 14:14:42 crc kubenswrapper[5039]: E1124 14:14:42.530580 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c962a656-154e-4824-8a62-2342b585931f" containerName="registry-server" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.530590 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="c962a656-154e-4824-8a62-2342b585931f" containerName="registry-server" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.530859 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4761a4e-a177-4629-812b-8f940a7c5b98" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.530887 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="c962a656-154e-4824-8a62-2342b585931f" containerName="registry-server" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.531979 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.535546 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.535928 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.536084 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.536360 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.536389 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.542013 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.570536 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h8rn4\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.570632 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h8rn4\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.570692 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h8rn4\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.570770 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h8rn4\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.570840 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h8rn4\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.570868 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6w4f6\" (UniqueName: \"kubernetes.io/projected/a1fa909b-2535-405a-9969-fc0ca9ff77fc-kube-api-access-6w4f6\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h8rn4\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.572973 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4"] Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.672836 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h8rn4\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.672888 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6w4f6\" (UniqueName: \"kubernetes.io/projected/a1fa909b-2535-405a-9969-fc0ca9ff77fc-kube-api-access-6w4f6\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h8rn4\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.673000 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h8rn4\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.673072 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h8rn4\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.673126 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h8rn4\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.673184 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h8rn4\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.674179 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h8rn4\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.677118 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h8rn4\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.677192 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h8rn4\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.677558 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h8rn4\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.678042 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h8rn4\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.691211 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6w4f6\" (UniqueName: \"kubernetes.io/projected/a1fa909b-2535-405a-9969-fc0ca9ff77fc-kube-api-access-6w4f6\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h8rn4\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:14:42 crc kubenswrapper[5039]: I1124 14:14:42.852923 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:14:43 crc kubenswrapper[5039]: I1124 14:14:43.370936 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4"] Nov 24 14:14:43 crc kubenswrapper[5039]: I1124 14:14:43.452962 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" event={"ID":"a1fa909b-2535-405a-9969-fc0ca9ff77fc","Type":"ContainerStarted","Data":"62a9d25fd3891ff7b138353684fa0dff9689f6c7b165043969ea264bbee9b61b"} Nov 24 14:14:44 crc kubenswrapper[5039]: I1124 14:14:44.461306 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" event={"ID":"a1fa909b-2535-405a-9969-fc0ca9ff77fc","Type":"ContainerStarted","Data":"03e31144fda886ae5daff4b551d122d69aeea87d6eb5b278228b1115b1e438d1"} Nov 24 14:14:44 crc kubenswrapper[5039]: I1124 14:14:44.480891 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" podStartSLOduration=1.821031889 podStartE2EDuration="2.48086085s" podCreationTimestamp="2025-11-24 14:14:42 +0000 UTC" firstStartedPulling="2025-11-24 14:14:43.377384843 +0000 UTC m=+3395.816509343" lastFinishedPulling="2025-11-24 14:14:44.037213804 +0000 UTC m=+3396.476338304" observedRunningTime="2025-11-24 14:14:44.478251136 +0000 UTC m=+3396.917375656" watchObservedRunningTime="2025-11-24 14:14:44.48086085 +0000 UTC m=+3396.919985350" Nov 24 14:14:50 crc kubenswrapper[5039]: I1124 14:14:50.101783 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:14:50 crc kubenswrapper[5039]: I1124 14:14:50.103147 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:14:50 crc kubenswrapper[5039]: I1124 14:14:50.103272 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 14:14:50 crc kubenswrapper[5039]: I1124 14:14:50.104166 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1622d9e91171952ff1f7ba0ab24928decb597098c11dc9185b79112e31ea3dec"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 14:14:50 crc kubenswrapper[5039]: I1124 14:14:50.104338 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://1622d9e91171952ff1f7ba0ab24928decb597098c11dc9185b79112e31ea3dec" gracePeriod=600 Nov 24 14:14:50 crc kubenswrapper[5039]: I1124 14:14:50.526796 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="1622d9e91171952ff1f7ba0ab24928decb597098c11dc9185b79112e31ea3dec" exitCode=0 Nov 24 14:14:50 crc kubenswrapper[5039]: I1124 14:14:50.526836 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"1622d9e91171952ff1f7ba0ab24928decb597098c11dc9185b79112e31ea3dec"} Nov 24 14:14:50 crc kubenswrapper[5039]: I1124 14:14:50.527093 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a"} Nov 24 14:14:50 crc kubenswrapper[5039]: I1124 14:14:50.527116 5039 scope.go:117] "RemoveContainer" containerID="fa57cca9605e72098496ff7cf9f0590df560a743b0c7c51fd2cccae495bb4d36" Nov 24 14:15:00 crc kubenswrapper[5039]: I1124 14:15:00.167906 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399895-l4dvm"] Nov 24 14:15:00 crc kubenswrapper[5039]: I1124 14:15:00.169983 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399895-l4dvm" Nov 24 14:15:00 crc kubenswrapper[5039]: I1124 14:15:00.173143 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 14:15:00 crc kubenswrapper[5039]: I1124 14:15:00.173779 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 14:15:00 crc kubenswrapper[5039]: I1124 14:15:00.179966 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399895-l4dvm"] Nov 24 14:15:00 crc kubenswrapper[5039]: I1124 14:15:00.281795 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8dd6f7c3-881e-4acb-9563-af143fdea78e-config-volume\") pod \"collect-profiles-29399895-l4dvm\" (UID: \"8dd6f7c3-881e-4acb-9563-af143fdea78e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399895-l4dvm" Nov 24 14:15:00 crc kubenswrapper[5039]: I1124 14:15:00.281878 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mb54j\" (UniqueName: \"kubernetes.io/projected/8dd6f7c3-881e-4acb-9563-af143fdea78e-kube-api-access-mb54j\") pod \"collect-profiles-29399895-l4dvm\" (UID: \"8dd6f7c3-881e-4acb-9563-af143fdea78e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399895-l4dvm" Nov 24 14:15:00 crc kubenswrapper[5039]: I1124 14:15:00.281926 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8dd6f7c3-881e-4acb-9563-af143fdea78e-secret-volume\") pod \"collect-profiles-29399895-l4dvm\" (UID: \"8dd6f7c3-881e-4acb-9563-af143fdea78e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399895-l4dvm" Nov 24 14:15:00 crc kubenswrapper[5039]: I1124 14:15:00.384292 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8dd6f7c3-881e-4acb-9563-af143fdea78e-config-volume\") pod \"collect-profiles-29399895-l4dvm\" (UID: \"8dd6f7c3-881e-4acb-9563-af143fdea78e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399895-l4dvm" Nov 24 14:15:00 crc kubenswrapper[5039]: I1124 14:15:00.384389 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mb54j\" (UniqueName: \"kubernetes.io/projected/8dd6f7c3-881e-4acb-9563-af143fdea78e-kube-api-access-mb54j\") pod \"collect-profiles-29399895-l4dvm\" (UID: \"8dd6f7c3-881e-4acb-9563-af143fdea78e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399895-l4dvm" Nov 24 14:15:00 crc kubenswrapper[5039]: I1124 14:15:00.384428 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8dd6f7c3-881e-4acb-9563-af143fdea78e-secret-volume\") pod \"collect-profiles-29399895-l4dvm\" (UID: \"8dd6f7c3-881e-4acb-9563-af143fdea78e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399895-l4dvm" Nov 24 14:15:00 crc kubenswrapper[5039]: I1124 14:15:00.386055 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8dd6f7c3-881e-4acb-9563-af143fdea78e-config-volume\") pod \"collect-profiles-29399895-l4dvm\" (UID: \"8dd6f7c3-881e-4acb-9563-af143fdea78e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399895-l4dvm" Nov 24 14:15:00 crc kubenswrapper[5039]: I1124 14:15:00.395458 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8dd6f7c3-881e-4acb-9563-af143fdea78e-secret-volume\") pod \"collect-profiles-29399895-l4dvm\" (UID: \"8dd6f7c3-881e-4acb-9563-af143fdea78e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399895-l4dvm" Nov 24 14:15:00 crc kubenswrapper[5039]: I1124 14:15:00.401689 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mb54j\" (UniqueName: \"kubernetes.io/projected/8dd6f7c3-881e-4acb-9563-af143fdea78e-kube-api-access-mb54j\") pod \"collect-profiles-29399895-l4dvm\" (UID: \"8dd6f7c3-881e-4acb-9563-af143fdea78e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399895-l4dvm" Nov 24 14:15:00 crc kubenswrapper[5039]: I1124 14:15:00.501585 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399895-l4dvm" Nov 24 14:15:00 crc kubenswrapper[5039]: I1124 14:15:00.960473 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399895-l4dvm"] Nov 24 14:15:00 crc kubenswrapper[5039]: W1124 14:15:00.965714 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8dd6f7c3_881e_4acb_9563_af143fdea78e.slice/crio-f68b0cb6ce27cf807ba003ffeefacbc362eefab9fb5fbd653d94843292c38570 WatchSource:0}: Error finding container f68b0cb6ce27cf807ba003ffeefacbc362eefab9fb5fbd653d94843292c38570: Status 404 returned error can't find the container with id f68b0cb6ce27cf807ba003ffeefacbc362eefab9fb5fbd653d94843292c38570 Nov 24 14:15:01 crc kubenswrapper[5039]: I1124 14:15:01.651610 5039 generic.go:334] "Generic (PLEG): container finished" podID="8dd6f7c3-881e-4acb-9563-af143fdea78e" containerID="f67ce7c9795a4ea3eef0e343c0e79040e0de30dde476d91dcfdebe81e36cb04b" exitCode=0 Nov 24 14:15:01 crc kubenswrapper[5039]: I1124 14:15:01.651789 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399895-l4dvm" event={"ID":"8dd6f7c3-881e-4acb-9563-af143fdea78e","Type":"ContainerDied","Data":"f67ce7c9795a4ea3eef0e343c0e79040e0de30dde476d91dcfdebe81e36cb04b"} Nov 24 14:15:01 crc kubenswrapper[5039]: I1124 14:15:01.651966 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399895-l4dvm" event={"ID":"8dd6f7c3-881e-4acb-9563-af143fdea78e","Type":"ContainerStarted","Data":"f68b0cb6ce27cf807ba003ffeefacbc362eefab9fb5fbd653d94843292c38570"} Nov 24 14:15:03 crc kubenswrapper[5039]: I1124 14:15:03.366839 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399895-l4dvm" Nov 24 14:15:03 crc kubenswrapper[5039]: I1124 14:15:03.551480 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8dd6f7c3-881e-4acb-9563-af143fdea78e-config-volume\") pod \"8dd6f7c3-881e-4acb-9563-af143fdea78e\" (UID: \"8dd6f7c3-881e-4acb-9563-af143fdea78e\") " Nov 24 14:15:03 crc kubenswrapper[5039]: I1124 14:15:03.551583 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8dd6f7c3-881e-4acb-9563-af143fdea78e-secret-volume\") pod \"8dd6f7c3-881e-4acb-9563-af143fdea78e\" (UID: \"8dd6f7c3-881e-4acb-9563-af143fdea78e\") " Nov 24 14:15:03 crc kubenswrapper[5039]: I1124 14:15:03.552137 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8dd6f7c3-881e-4acb-9563-af143fdea78e-config-volume" (OuterVolumeSpecName: "config-volume") pod "8dd6f7c3-881e-4acb-9563-af143fdea78e" (UID: "8dd6f7c3-881e-4acb-9563-af143fdea78e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 14:15:03 crc kubenswrapper[5039]: I1124 14:15:03.552914 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mb54j\" (UniqueName: \"kubernetes.io/projected/8dd6f7c3-881e-4acb-9563-af143fdea78e-kube-api-access-mb54j\") pod \"8dd6f7c3-881e-4acb-9563-af143fdea78e\" (UID: \"8dd6f7c3-881e-4acb-9563-af143fdea78e\") " Nov 24 14:15:03 crc kubenswrapper[5039]: I1124 14:15:03.553439 5039 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8dd6f7c3-881e-4acb-9563-af143fdea78e-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 14:15:03 crc kubenswrapper[5039]: I1124 14:15:03.563188 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8dd6f7c3-881e-4acb-9563-af143fdea78e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8dd6f7c3-881e-4acb-9563-af143fdea78e" (UID: "8dd6f7c3-881e-4acb-9563-af143fdea78e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:15:03 crc kubenswrapper[5039]: I1124 14:15:03.565470 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8dd6f7c3-881e-4acb-9563-af143fdea78e-kube-api-access-mb54j" (OuterVolumeSpecName: "kube-api-access-mb54j") pod "8dd6f7c3-881e-4acb-9563-af143fdea78e" (UID: "8dd6f7c3-881e-4acb-9563-af143fdea78e"). InnerVolumeSpecName "kube-api-access-mb54j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:15:03 crc kubenswrapper[5039]: I1124 14:15:03.655145 5039 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8dd6f7c3-881e-4acb-9563-af143fdea78e-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 14:15:03 crc kubenswrapper[5039]: I1124 14:15:03.655182 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mb54j\" (UniqueName: \"kubernetes.io/projected/8dd6f7c3-881e-4acb-9563-af143fdea78e-kube-api-access-mb54j\") on node \"crc\" DevicePath \"\"" Nov 24 14:15:03 crc kubenswrapper[5039]: I1124 14:15:03.673286 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399895-l4dvm" event={"ID":"8dd6f7c3-881e-4acb-9563-af143fdea78e","Type":"ContainerDied","Data":"f68b0cb6ce27cf807ba003ffeefacbc362eefab9fb5fbd653d94843292c38570"} Nov 24 14:15:03 crc kubenswrapper[5039]: I1124 14:15:03.673341 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f68b0cb6ce27cf807ba003ffeefacbc362eefab9fb5fbd653d94843292c38570" Nov 24 14:15:03 crc kubenswrapper[5039]: I1124 14:15:03.673378 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399895-l4dvm" Nov 24 14:15:04 crc kubenswrapper[5039]: I1124 14:15:04.457460 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399850-kwqf4"] Nov 24 14:15:04 crc kubenswrapper[5039]: I1124 14:15:04.467108 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399850-kwqf4"] Nov 24 14:15:05 crc kubenswrapper[5039]: I1124 14:15:05.907455 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-p2v87"] Nov 24 14:15:05 crc kubenswrapper[5039]: E1124 14:15:05.910148 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8dd6f7c3-881e-4acb-9563-af143fdea78e" containerName="collect-profiles" Nov 24 14:15:05 crc kubenswrapper[5039]: I1124 14:15:05.910348 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dd6f7c3-881e-4acb-9563-af143fdea78e" containerName="collect-profiles" Nov 24 14:15:05 crc kubenswrapper[5039]: I1124 14:15:05.911078 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="8dd6f7c3-881e-4acb-9563-af143fdea78e" containerName="collect-profiles" Nov 24 14:15:05 crc kubenswrapper[5039]: I1124 14:15:05.914458 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p2v87" Nov 24 14:15:05 crc kubenswrapper[5039]: I1124 14:15:05.916893 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2v87"] Nov 24 14:15:06 crc kubenswrapper[5039]: I1124 14:15:06.103213 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b71aaa20-b9e9-4b40-ad65-00949337cc0a-utilities\") pod \"redhat-marketplace-p2v87\" (UID: \"b71aaa20-b9e9-4b40-ad65-00949337cc0a\") " pod="openshift-marketplace/redhat-marketplace-p2v87" Nov 24 14:15:06 crc kubenswrapper[5039]: I1124 14:15:06.103363 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnqhj\" (UniqueName: \"kubernetes.io/projected/b71aaa20-b9e9-4b40-ad65-00949337cc0a-kube-api-access-gnqhj\") pod \"redhat-marketplace-p2v87\" (UID: \"b71aaa20-b9e9-4b40-ad65-00949337cc0a\") " pod="openshift-marketplace/redhat-marketplace-p2v87" Nov 24 14:15:06 crc kubenswrapper[5039]: I1124 14:15:06.103465 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b71aaa20-b9e9-4b40-ad65-00949337cc0a-catalog-content\") pod \"redhat-marketplace-p2v87\" (UID: \"b71aaa20-b9e9-4b40-ad65-00949337cc0a\") " pod="openshift-marketplace/redhat-marketplace-p2v87" Nov 24 14:15:06 crc kubenswrapper[5039]: I1124 14:15:06.205954 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b71aaa20-b9e9-4b40-ad65-00949337cc0a-catalog-content\") pod \"redhat-marketplace-p2v87\" (UID: \"b71aaa20-b9e9-4b40-ad65-00949337cc0a\") " pod="openshift-marketplace/redhat-marketplace-p2v87" Nov 24 14:15:06 crc kubenswrapper[5039]: I1124 14:15:06.206050 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b71aaa20-b9e9-4b40-ad65-00949337cc0a-utilities\") pod \"redhat-marketplace-p2v87\" (UID: \"b71aaa20-b9e9-4b40-ad65-00949337cc0a\") " pod="openshift-marketplace/redhat-marketplace-p2v87" Nov 24 14:15:06 crc kubenswrapper[5039]: I1124 14:15:06.206138 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnqhj\" (UniqueName: \"kubernetes.io/projected/b71aaa20-b9e9-4b40-ad65-00949337cc0a-kube-api-access-gnqhj\") pod \"redhat-marketplace-p2v87\" (UID: \"b71aaa20-b9e9-4b40-ad65-00949337cc0a\") " pod="openshift-marketplace/redhat-marketplace-p2v87" Nov 24 14:15:06 crc kubenswrapper[5039]: I1124 14:15:06.206565 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b71aaa20-b9e9-4b40-ad65-00949337cc0a-catalog-content\") pod \"redhat-marketplace-p2v87\" (UID: \"b71aaa20-b9e9-4b40-ad65-00949337cc0a\") " pod="openshift-marketplace/redhat-marketplace-p2v87" Nov 24 14:15:06 crc kubenswrapper[5039]: I1124 14:15:06.206762 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b71aaa20-b9e9-4b40-ad65-00949337cc0a-utilities\") pod \"redhat-marketplace-p2v87\" (UID: \"b71aaa20-b9e9-4b40-ad65-00949337cc0a\") " pod="openshift-marketplace/redhat-marketplace-p2v87" Nov 24 14:15:06 crc kubenswrapper[5039]: I1124 14:15:06.227276 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnqhj\" (UniqueName: \"kubernetes.io/projected/b71aaa20-b9e9-4b40-ad65-00949337cc0a-kube-api-access-gnqhj\") pod \"redhat-marketplace-p2v87\" (UID: \"b71aaa20-b9e9-4b40-ad65-00949337cc0a\") " pod="openshift-marketplace/redhat-marketplace-p2v87" Nov 24 14:15:06 crc kubenswrapper[5039]: I1124 14:15:06.250477 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p2v87" Nov 24 14:15:06 crc kubenswrapper[5039]: I1124 14:15:06.351280 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8b02714-17dd-49f0-8cbe-2c61d3123d77" path="/var/lib/kubelet/pods/f8b02714-17dd-49f0-8cbe-2c61d3123d77/volumes" Nov 24 14:15:06 crc kubenswrapper[5039]: I1124 14:15:06.759949 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2v87"] Nov 24 14:15:07 crc kubenswrapper[5039]: I1124 14:15:07.715500 5039 generic.go:334] "Generic (PLEG): container finished" podID="b71aaa20-b9e9-4b40-ad65-00949337cc0a" containerID="a1ab0b9e019920f8f895dafa48078761bd11f305cf55fde171fba81464b2dd02" exitCode=0 Nov 24 14:15:07 crc kubenswrapper[5039]: I1124 14:15:07.716086 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2v87" event={"ID":"b71aaa20-b9e9-4b40-ad65-00949337cc0a","Type":"ContainerDied","Data":"a1ab0b9e019920f8f895dafa48078761bd11f305cf55fde171fba81464b2dd02"} Nov 24 14:15:07 crc kubenswrapper[5039]: I1124 14:15:07.716117 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2v87" event={"ID":"b71aaa20-b9e9-4b40-ad65-00949337cc0a","Type":"ContainerStarted","Data":"3031719ad941fc16691830b01161c8dc6ae72d36b8c2d30b6b4f6f87bd1e5582"} Nov 24 14:15:09 crc kubenswrapper[5039]: I1124 14:15:09.735033 5039 generic.go:334] "Generic (PLEG): container finished" podID="b71aaa20-b9e9-4b40-ad65-00949337cc0a" containerID="3d0a02e3e61256bc9da3b4f8da2ef95b353b8a3f54dba8f3008bbfed54290dcd" exitCode=0 Nov 24 14:15:09 crc kubenswrapper[5039]: I1124 14:15:09.735131 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2v87" event={"ID":"b71aaa20-b9e9-4b40-ad65-00949337cc0a","Type":"ContainerDied","Data":"3d0a02e3e61256bc9da3b4f8da2ef95b353b8a3f54dba8f3008bbfed54290dcd"} Nov 24 14:15:10 crc kubenswrapper[5039]: I1124 14:15:10.750769 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2v87" event={"ID":"b71aaa20-b9e9-4b40-ad65-00949337cc0a","Type":"ContainerStarted","Data":"4135ea27fe9cb1d688e7a4baacf54bf33c772aa8fa556a1f62dd2fea5c4061e6"} Nov 24 14:15:10 crc kubenswrapper[5039]: I1124 14:15:10.782125 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-p2v87" podStartSLOduration=3.166563578 podStartE2EDuration="5.78210935s" podCreationTimestamp="2025-11-24 14:15:05 +0000 UTC" firstStartedPulling="2025-11-24 14:15:07.718670137 +0000 UTC m=+3420.157794637" lastFinishedPulling="2025-11-24 14:15:10.334215909 +0000 UTC m=+3422.773340409" observedRunningTime="2025-11-24 14:15:10.778572152 +0000 UTC m=+3423.217696672" watchObservedRunningTime="2025-11-24 14:15:10.78210935 +0000 UTC m=+3423.221233850" Nov 24 14:15:16 crc kubenswrapper[5039]: I1124 14:15:16.251745 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-p2v87" Nov 24 14:15:16 crc kubenswrapper[5039]: I1124 14:15:16.252453 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-p2v87" Nov 24 14:15:16 crc kubenswrapper[5039]: I1124 14:15:16.326791 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-p2v87" Nov 24 14:15:16 crc kubenswrapper[5039]: I1124 14:15:16.852772 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-p2v87" Nov 24 14:15:16 crc kubenswrapper[5039]: I1124 14:15:16.915811 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2v87"] Nov 24 14:15:18 crc kubenswrapper[5039]: I1124 14:15:18.886332 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-p2v87" podUID="b71aaa20-b9e9-4b40-ad65-00949337cc0a" containerName="registry-server" containerID="cri-o://4135ea27fe9cb1d688e7a4baacf54bf33c772aa8fa556a1f62dd2fea5c4061e6" gracePeriod=2 Nov 24 14:15:19 crc kubenswrapper[5039]: I1124 14:15:19.384427 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p2v87" Nov 24 14:15:19 crc kubenswrapper[5039]: I1124 14:15:19.506691 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b71aaa20-b9e9-4b40-ad65-00949337cc0a-utilities\") pod \"b71aaa20-b9e9-4b40-ad65-00949337cc0a\" (UID: \"b71aaa20-b9e9-4b40-ad65-00949337cc0a\") " Nov 24 14:15:19 crc kubenswrapper[5039]: I1124 14:15:19.506935 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b71aaa20-b9e9-4b40-ad65-00949337cc0a-catalog-content\") pod \"b71aaa20-b9e9-4b40-ad65-00949337cc0a\" (UID: \"b71aaa20-b9e9-4b40-ad65-00949337cc0a\") " Nov 24 14:15:19 crc kubenswrapper[5039]: I1124 14:15:19.506996 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnqhj\" (UniqueName: \"kubernetes.io/projected/b71aaa20-b9e9-4b40-ad65-00949337cc0a-kube-api-access-gnqhj\") pod \"b71aaa20-b9e9-4b40-ad65-00949337cc0a\" (UID: \"b71aaa20-b9e9-4b40-ad65-00949337cc0a\") " Nov 24 14:15:19 crc kubenswrapper[5039]: I1124 14:15:19.508732 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b71aaa20-b9e9-4b40-ad65-00949337cc0a-utilities" (OuterVolumeSpecName: "utilities") pod "b71aaa20-b9e9-4b40-ad65-00949337cc0a" (UID: "b71aaa20-b9e9-4b40-ad65-00949337cc0a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:15:19 crc kubenswrapper[5039]: I1124 14:15:19.514023 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b71aaa20-b9e9-4b40-ad65-00949337cc0a-kube-api-access-gnqhj" (OuterVolumeSpecName: "kube-api-access-gnqhj") pod "b71aaa20-b9e9-4b40-ad65-00949337cc0a" (UID: "b71aaa20-b9e9-4b40-ad65-00949337cc0a"). InnerVolumeSpecName "kube-api-access-gnqhj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:15:19 crc kubenswrapper[5039]: I1124 14:15:19.539269 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b71aaa20-b9e9-4b40-ad65-00949337cc0a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b71aaa20-b9e9-4b40-ad65-00949337cc0a" (UID: "b71aaa20-b9e9-4b40-ad65-00949337cc0a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:15:19 crc kubenswrapper[5039]: I1124 14:15:19.612351 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnqhj\" (UniqueName: \"kubernetes.io/projected/b71aaa20-b9e9-4b40-ad65-00949337cc0a-kube-api-access-gnqhj\") on node \"crc\" DevicePath \"\"" Nov 24 14:15:19 crc kubenswrapper[5039]: I1124 14:15:19.612400 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b71aaa20-b9e9-4b40-ad65-00949337cc0a-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:15:19 crc kubenswrapper[5039]: I1124 14:15:19.612414 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b71aaa20-b9e9-4b40-ad65-00949337cc0a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:15:19 crc kubenswrapper[5039]: I1124 14:15:19.899120 5039 generic.go:334] "Generic (PLEG): container finished" podID="b71aaa20-b9e9-4b40-ad65-00949337cc0a" containerID="4135ea27fe9cb1d688e7a4baacf54bf33c772aa8fa556a1f62dd2fea5c4061e6" exitCode=0 Nov 24 14:15:19 crc kubenswrapper[5039]: I1124 14:15:19.899161 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2v87" event={"ID":"b71aaa20-b9e9-4b40-ad65-00949337cc0a","Type":"ContainerDied","Data":"4135ea27fe9cb1d688e7a4baacf54bf33c772aa8fa556a1f62dd2fea5c4061e6"} Nov 24 14:15:19 crc kubenswrapper[5039]: I1124 14:15:19.899175 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p2v87" Nov 24 14:15:19 crc kubenswrapper[5039]: I1124 14:15:19.899185 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2v87" event={"ID":"b71aaa20-b9e9-4b40-ad65-00949337cc0a","Type":"ContainerDied","Data":"3031719ad941fc16691830b01161c8dc6ae72d36b8c2d30b6b4f6f87bd1e5582"} Nov 24 14:15:19 crc kubenswrapper[5039]: I1124 14:15:19.899201 5039 scope.go:117] "RemoveContainer" containerID="4135ea27fe9cb1d688e7a4baacf54bf33c772aa8fa556a1f62dd2fea5c4061e6" Nov 24 14:15:19 crc kubenswrapper[5039]: I1124 14:15:19.932175 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2v87"] Nov 24 14:15:19 crc kubenswrapper[5039]: I1124 14:15:19.934597 5039 scope.go:117] "RemoveContainer" containerID="3d0a02e3e61256bc9da3b4f8da2ef95b353b8a3f54dba8f3008bbfed54290dcd" Nov 24 14:15:19 crc kubenswrapper[5039]: I1124 14:15:19.941249 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2v87"] Nov 24 14:15:19 crc kubenswrapper[5039]: I1124 14:15:19.969652 5039 scope.go:117] "RemoveContainer" containerID="a1ab0b9e019920f8f895dafa48078761bd11f305cf55fde171fba81464b2dd02" Nov 24 14:15:20 crc kubenswrapper[5039]: I1124 14:15:20.020810 5039 scope.go:117] "RemoveContainer" containerID="4135ea27fe9cb1d688e7a4baacf54bf33c772aa8fa556a1f62dd2fea5c4061e6" Nov 24 14:15:20 crc kubenswrapper[5039]: E1124 14:15:20.021266 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4135ea27fe9cb1d688e7a4baacf54bf33c772aa8fa556a1f62dd2fea5c4061e6\": container with ID starting with 4135ea27fe9cb1d688e7a4baacf54bf33c772aa8fa556a1f62dd2fea5c4061e6 not found: ID does not exist" containerID="4135ea27fe9cb1d688e7a4baacf54bf33c772aa8fa556a1f62dd2fea5c4061e6" Nov 24 14:15:20 crc kubenswrapper[5039]: I1124 14:15:20.021309 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4135ea27fe9cb1d688e7a4baacf54bf33c772aa8fa556a1f62dd2fea5c4061e6"} err="failed to get container status \"4135ea27fe9cb1d688e7a4baacf54bf33c772aa8fa556a1f62dd2fea5c4061e6\": rpc error: code = NotFound desc = could not find container \"4135ea27fe9cb1d688e7a4baacf54bf33c772aa8fa556a1f62dd2fea5c4061e6\": container with ID starting with 4135ea27fe9cb1d688e7a4baacf54bf33c772aa8fa556a1f62dd2fea5c4061e6 not found: ID does not exist" Nov 24 14:15:20 crc kubenswrapper[5039]: I1124 14:15:20.021337 5039 scope.go:117] "RemoveContainer" containerID="3d0a02e3e61256bc9da3b4f8da2ef95b353b8a3f54dba8f3008bbfed54290dcd" Nov 24 14:15:20 crc kubenswrapper[5039]: E1124 14:15:20.021705 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d0a02e3e61256bc9da3b4f8da2ef95b353b8a3f54dba8f3008bbfed54290dcd\": container with ID starting with 3d0a02e3e61256bc9da3b4f8da2ef95b353b8a3f54dba8f3008bbfed54290dcd not found: ID does not exist" containerID="3d0a02e3e61256bc9da3b4f8da2ef95b353b8a3f54dba8f3008bbfed54290dcd" Nov 24 14:15:20 crc kubenswrapper[5039]: I1124 14:15:20.021755 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d0a02e3e61256bc9da3b4f8da2ef95b353b8a3f54dba8f3008bbfed54290dcd"} err="failed to get container status \"3d0a02e3e61256bc9da3b4f8da2ef95b353b8a3f54dba8f3008bbfed54290dcd\": rpc error: code = NotFound desc = could not find container \"3d0a02e3e61256bc9da3b4f8da2ef95b353b8a3f54dba8f3008bbfed54290dcd\": container with ID starting with 3d0a02e3e61256bc9da3b4f8da2ef95b353b8a3f54dba8f3008bbfed54290dcd not found: ID does not exist" Nov 24 14:15:20 crc kubenswrapper[5039]: I1124 14:15:20.021791 5039 scope.go:117] "RemoveContainer" containerID="a1ab0b9e019920f8f895dafa48078761bd11f305cf55fde171fba81464b2dd02" Nov 24 14:15:20 crc kubenswrapper[5039]: E1124 14:15:20.022054 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1ab0b9e019920f8f895dafa48078761bd11f305cf55fde171fba81464b2dd02\": container with ID starting with a1ab0b9e019920f8f895dafa48078761bd11f305cf55fde171fba81464b2dd02 not found: ID does not exist" containerID="a1ab0b9e019920f8f895dafa48078761bd11f305cf55fde171fba81464b2dd02" Nov 24 14:15:20 crc kubenswrapper[5039]: I1124 14:15:20.022085 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1ab0b9e019920f8f895dafa48078761bd11f305cf55fde171fba81464b2dd02"} err="failed to get container status \"a1ab0b9e019920f8f895dafa48078761bd11f305cf55fde171fba81464b2dd02\": rpc error: code = NotFound desc = could not find container \"a1ab0b9e019920f8f895dafa48078761bd11f305cf55fde171fba81464b2dd02\": container with ID starting with a1ab0b9e019920f8f895dafa48078761bd11f305cf55fde171fba81464b2dd02 not found: ID does not exist" Nov 24 14:15:20 crc kubenswrapper[5039]: I1124 14:15:20.322006 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b71aaa20-b9e9-4b40-ad65-00949337cc0a" path="/var/lib/kubelet/pods/b71aaa20-b9e9-4b40-ad65-00949337cc0a/volumes" Nov 24 14:15:51 crc kubenswrapper[5039]: I1124 14:15:51.959240 5039 scope.go:117] "RemoveContainer" containerID="d5242c347ed24e17b516433160bff47edb8971f9b4473fdcc9da59c8e3dc229b" Nov 24 14:16:01 crc kubenswrapper[5039]: I1124 14:16:01.419568 5039 generic.go:334] "Generic (PLEG): container finished" podID="a1fa909b-2535-405a-9969-fc0ca9ff77fc" containerID="03e31144fda886ae5daff4b551d122d69aeea87d6eb5b278228b1115b1e438d1" exitCode=0 Nov 24 14:16:01 crc kubenswrapper[5039]: I1124 14:16:01.419699 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" event={"ID":"a1fa909b-2535-405a-9969-fc0ca9ff77fc","Type":"ContainerDied","Data":"03e31144fda886ae5daff4b551d122d69aeea87d6eb5b278228b1115b1e438d1"} Nov 24 14:16:03 crc kubenswrapper[5039]: I1124 14:16:03.718506 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:16:03 crc kubenswrapper[5039]: I1124 14:16:03.801370 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-inventory\") pod \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " Nov 24 14:16:03 crc kubenswrapper[5039]: I1124 14:16:03.801465 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6w4f6\" (UniqueName: \"kubernetes.io/projected/a1fa909b-2535-405a-9969-fc0ca9ff77fc-kube-api-access-6w4f6\") pod \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " Nov 24 14:16:03 crc kubenswrapper[5039]: I1124 14:16:03.801573 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ceph\") pod \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " Nov 24 14:16:03 crc kubenswrapper[5039]: I1124 14:16:03.801615 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ovncontroller-config-0\") pod \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " Nov 24 14:16:03 crc kubenswrapper[5039]: I1124 14:16:03.801768 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ovn-combined-ca-bundle\") pod \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " Nov 24 14:16:03 crc kubenswrapper[5039]: I1124 14:16:03.801811 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ssh-key\") pod \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\" (UID: \"a1fa909b-2535-405a-9969-fc0ca9ff77fc\") " Nov 24 14:16:03 crc kubenswrapper[5039]: I1124 14:16:03.807453 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "a1fa909b-2535-405a-9969-fc0ca9ff77fc" (UID: "a1fa909b-2535-405a-9969-fc0ca9ff77fc"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:16:03 crc kubenswrapper[5039]: I1124 14:16:03.808411 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1fa909b-2535-405a-9969-fc0ca9ff77fc-kube-api-access-6w4f6" (OuterVolumeSpecName: "kube-api-access-6w4f6") pod "a1fa909b-2535-405a-9969-fc0ca9ff77fc" (UID: "a1fa909b-2535-405a-9969-fc0ca9ff77fc"). InnerVolumeSpecName "kube-api-access-6w4f6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:16:03 crc kubenswrapper[5039]: I1124 14:16:03.810171 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ceph" (OuterVolumeSpecName: "ceph") pod "a1fa909b-2535-405a-9969-fc0ca9ff77fc" (UID: "a1fa909b-2535-405a-9969-fc0ca9ff77fc"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:16:03 crc kubenswrapper[5039]: I1124 14:16:03.833736 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "a1fa909b-2535-405a-9969-fc0ca9ff77fc" (UID: "a1fa909b-2535-405a-9969-fc0ca9ff77fc"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 14:16:03 crc kubenswrapper[5039]: I1124 14:16:03.836885 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-inventory" (OuterVolumeSpecName: "inventory") pod "a1fa909b-2535-405a-9969-fc0ca9ff77fc" (UID: "a1fa909b-2535-405a-9969-fc0ca9ff77fc"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:16:03 crc kubenswrapper[5039]: I1124 14:16:03.837253 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a1fa909b-2535-405a-9969-fc0ca9ff77fc" (UID: "a1fa909b-2535-405a-9969-fc0ca9ff77fc"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:16:03 crc kubenswrapper[5039]: I1124 14:16:03.905027 5039 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:16:03 crc kubenswrapper[5039]: I1124 14:16:03.905057 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:16:03 crc kubenswrapper[5039]: I1124 14:16:03.905066 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 14:16:03 crc kubenswrapper[5039]: I1124 14:16:03.905075 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6w4f6\" (UniqueName: \"kubernetes.io/projected/a1fa909b-2535-405a-9969-fc0ca9ff77fc-kube-api-access-6w4f6\") on node \"crc\" DevicePath \"\"" Nov 24 14:16:03 crc kubenswrapper[5039]: I1124 14:16:03.905084 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:16:03 crc kubenswrapper[5039]: I1124 14:16:03.905093 5039 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/a1fa909b-2535-405a-9969-fc0ca9ff77fc-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 24 14:16:04 crc kubenswrapper[5039]: I1124 14:16:04.450088 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" event={"ID":"a1fa909b-2535-405a-9969-fc0ca9ff77fc","Type":"ContainerDied","Data":"62a9d25fd3891ff7b138353684fa0dff9689f6c7b165043969ea264bbee9b61b"} Nov 24 14:16:04 crc kubenswrapper[5039]: I1124 14:16:04.450565 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="62a9d25fd3891ff7b138353684fa0dff9689f6c7b165043969ea264bbee9b61b" Nov 24 14:16:04 crc kubenswrapper[5039]: I1124 14:16:04.450336 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h8rn4" Nov 24 14:16:04 crc kubenswrapper[5039]: I1124 14:16:04.877277 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7"] Nov 24 14:16:04 crc kubenswrapper[5039]: E1124 14:16:04.878064 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b71aaa20-b9e9-4b40-ad65-00949337cc0a" containerName="registry-server" Nov 24 14:16:04 crc kubenswrapper[5039]: I1124 14:16:04.878081 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b71aaa20-b9e9-4b40-ad65-00949337cc0a" containerName="registry-server" Nov 24 14:16:04 crc kubenswrapper[5039]: E1124 14:16:04.878092 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b71aaa20-b9e9-4b40-ad65-00949337cc0a" containerName="extract-utilities" Nov 24 14:16:04 crc kubenswrapper[5039]: I1124 14:16:04.878101 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b71aaa20-b9e9-4b40-ad65-00949337cc0a" containerName="extract-utilities" Nov 24 14:16:04 crc kubenswrapper[5039]: E1124 14:16:04.878149 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1fa909b-2535-405a-9969-fc0ca9ff77fc" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 24 14:16:04 crc kubenswrapper[5039]: I1124 14:16:04.878157 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1fa909b-2535-405a-9969-fc0ca9ff77fc" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 24 14:16:04 crc kubenswrapper[5039]: E1124 14:16:04.878175 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b71aaa20-b9e9-4b40-ad65-00949337cc0a" containerName="extract-content" Nov 24 14:16:04 crc kubenswrapper[5039]: I1124 14:16:04.878182 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b71aaa20-b9e9-4b40-ad65-00949337cc0a" containerName="extract-content" Nov 24 14:16:04 crc kubenswrapper[5039]: I1124 14:16:04.878453 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="b71aaa20-b9e9-4b40-ad65-00949337cc0a" containerName="registry-server" Nov 24 14:16:04 crc kubenswrapper[5039]: I1124 14:16:04.878474 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1fa909b-2535-405a-9969-fc0ca9ff77fc" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 24 14:16:04 crc kubenswrapper[5039]: I1124 14:16:04.879591 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:04 crc kubenswrapper[5039]: I1124 14:16:04.882832 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 24 14:16:04 crc kubenswrapper[5039]: I1124 14:16:04.883231 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 14:16:04 crc kubenswrapper[5039]: I1124 14:16:04.885448 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 24 14:16:04 crc kubenswrapper[5039]: I1124 14:16:04.885933 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 24 14:16:04 crc kubenswrapper[5039]: I1124 14:16:04.886215 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 14:16:04 crc kubenswrapper[5039]: I1124 14:16:04.886440 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 14:16:04 crc kubenswrapper[5039]: I1124 14:16:04.887286 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 14:16:04 crc kubenswrapper[5039]: I1124 14:16:04.892538 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7"] Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.030747 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.030804 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.030872 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pgsgk\" (UniqueName: \"kubernetes.io/projected/bb9ef170-1b1b-4027-9ac0-b0e67efda529-kube-api-access-pgsgk\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.030916 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.030938 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.031016 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.031046 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.132730 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.132790 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.132851 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.132876 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.132941 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pgsgk\" (UniqueName: \"kubernetes.io/projected/bb9ef170-1b1b-4027-9ac0-b0e67efda529-kube-api-access-pgsgk\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.132984 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.133004 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.136372 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.137567 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.137838 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.138159 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.138532 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.146489 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.150133 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pgsgk\" (UniqueName: \"kubernetes.io/projected/bb9ef170-1b1b-4027-9ac0-b0e67efda529-kube-api-access-pgsgk\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.231644 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:16:05 crc kubenswrapper[5039]: I1124 14:16:05.813768 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7"] Nov 24 14:16:06 crc kubenswrapper[5039]: I1124 14:16:06.470313 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" event={"ID":"bb9ef170-1b1b-4027-9ac0-b0e67efda529","Type":"ContainerStarted","Data":"f3ca554a3e5afe99358cf79f6bf154d4a8747467250ed23b0c356017a5008d09"} Nov 24 14:16:07 crc kubenswrapper[5039]: I1124 14:16:07.490874 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" event={"ID":"bb9ef170-1b1b-4027-9ac0-b0e67efda529","Type":"ContainerStarted","Data":"e48a21a5da9f906595c22b9e799a877dbf3ba065c4fb3a26f4f5c3684f0eaa1d"} Nov 24 14:16:07 crc kubenswrapper[5039]: I1124 14:16:07.523087 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" podStartSLOduration=3.049583714 podStartE2EDuration="3.523056991s" podCreationTimestamp="2025-11-24 14:16:04 +0000 UTC" firstStartedPulling="2025-11-24 14:16:05.811188342 +0000 UTC m=+3478.250312842" lastFinishedPulling="2025-11-24 14:16:06.284661609 +0000 UTC m=+3478.723786119" observedRunningTime="2025-11-24 14:16:07.509680383 +0000 UTC m=+3479.948804883" watchObservedRunningTime="2025-11-24 14:16:07.523056991 +0000 UTC m=+3479.962181491" Nov 24 14:16:50 crc kubenswrapper[5039]: I1124 14:16:50.102296 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:16:50 crc kubenswrapper[5039]: I1124 14:16:50.102998 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:17:19 crc kubenswrapper[5039]: I1124 14:17:19.228475 5039 generic.go:334] "Generic (PLEG): container finished" podID="bb9ef170-1b1b-4027-9ac0-b0e67efda529" containerID="e48a21a5da9f906595c22b9e799a877dbf3ba065c4fb3a26f4f5c3684f0eaa1d" exitCode=0 Nov 24 14:17:19 crc kubenswrapper[5039]: I1124 14:17:19.228574 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" event={"ID":"bb9ef170-1b1b-4027-9ac0-b0e67efda529","Type":"ContainerDied","Data":"e48a21a5da9f906595c22b9e799a877dbf3ba065c4fb3a26f4f5c3684f0eaa1d"} Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.101100 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.101444 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.760023 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.806399 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-inventory\") pod \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.806572 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-neutron-ovn-metadata-agent-neutron-config-0\") pod \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.806641 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-neutron-metadata-combined-ca-bundle\") pod \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.806673 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-nova-metadata-neutron-config-0\") pod \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.806696 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-ceph\") pod \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.806779 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pgsgk\" (UniqueName: \"kubernetes.io/projected/bb9ef170-1b1b-4027-9ac0-b0e67efda529-kube-api-access-pgsgk\") pod \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.806806 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-ssh-key\") pod \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\" (UID: \"bb9ef170-1b1b-4027-9ac0-b0e67efda529\") " Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.812862 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "bb9ef170-1b1b-4027-9ac0-b0e67efda529" (UID: "bb9ef170-1b1b-4027-9ac0-b0e67efda529"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.820713 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb9ef170-1b1b-4027-9ac0-b0e67efda529-kube-api-access-pgsgk" (OuterVolumeSpecName: "kube-api-access-pgsgk") pod "bb9ef170-1b1b-4027-9ac0-b0e67efda529" (UID: "bb9ef170-1b1b-4027-9ac0-b0e67efda529"). InnerVolumeSpecName "kube-api-access-pgsgk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.829737 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-ceph" (OuterVolumeSpecName: "ceph") pod "bb9ef170-1b1b-4027-9ac0-b0e67efda529" (UID: "bb9ef170-1b1b-4027-9ac0-b0e67efda529"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.840109 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "bb9ef170-1b1b-4027-9ac0-b0e67efda529" (UID: "bb9ef170-1b1b-4027-9ac0-b0e67efda529"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.842267 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-inventory" (OuterVolumeSpecName: "inventory") pod "bb9ef170-1b1b-4027-9ac0-b0e67efda529" (UID: "bb9ef170-1b1b-4027-9ac0-b0e67efda529"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.844659 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "bb9ef170-1b1b-4027-9ac0-b0e67efda529" (UID: "bb9ef170-1b1b-4027-9ac0-b0e67efda529"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.872037 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "bb9ef170-1b1b-4027-9ac0-b0e67efda529" (UID: "bb9ef170-1b1b-4027-9ac0-b0e67efda529"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.912922 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.912961 5039 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.912975 5039 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.912987 5039 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.912998 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.913007 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pgsgk\" (UniqueName: \"kubernetes.io/projected/bb9ef170-1b1b-4027-9ac0-b0e67efda529-kube-api-access-pgsgk\") on node \"crc\" DevicePath \"\"" Nov 24 14:17:20 crc kubenswrapper[5039]: I1124 14:17:20.913016 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bb9ef170-1b1b-4027-9ac0-b0e67efda529-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.249866 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" event={"ID":"bb9ef170-1b1b-4027-9ac0-b0e67efda529","Type":"ContainerDied","Data":"f3ca554a3e5afe99358cf79f6bf154d4a8747467250ed23b0c356017a5008d09"} Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.250139 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f3ca554a3e5afe99358cf79f6bf154d4a8747467250ed23b0c356017a5008d09" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.249981 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.361472 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f"] Nov 24 14:17:21 crc kubenswrapper[5039]: E1124 14:17:21.362176 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb9ef170-1b1b-4027-9ac0-b0e67efda529" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.362211 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb9ef170-1b1b-4027-9ac0-b0e67efda529" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.362498 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb9ef170-1b1b-4027-9ac0-b0e67efda529" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.363475 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.375319 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f"] Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.377889 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.377925 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.378212 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.378686 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.385949 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.392755 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.422382 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5s78f\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.422441 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5s78f\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.422601 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5s78f\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.422830 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5s78f\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.422895 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwtj2\" (UniqueName: \"kubernetes.io/projected/09487809-1d9c-44f7-81e0-91d56354f51c-kube-api-access-wwtj2\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5s78f\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.422925 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5s78f\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.525617 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5s78f\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.526026 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwtj2\" (UniqueName: \"kubernetes.io/projected/09487809-1d9c-44f7-81e0-91d56354f51c-kube-api-access-wwtj2\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5s78f\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.526115 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5s78f\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.526379 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5s78f\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.526447 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5s78f\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.526620 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5s78f\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.530990 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5s78f\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.531594 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5s78f\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.531672 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5s78f\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.532173 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5s78f\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.534171 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5s78f\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.542971 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwtj2\" (UniqueName: \"kubernetes.io/projected/09487809-1d9c-44f7-81e0-91d56354f51c-kube-api-access-wwtj2\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5s78f\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:17:21 crc kubenswrapper[5039]: I1124 14:17:21.699803 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:17:22 crc kubenswrapper[5039]: I1124 14:17:22.252963 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f"] Nov 24 14:17:22 crc kubenswrapper[5039]: I1124 14:17:22.259914 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" event={"ID":"09487809-1d9c-44f7-81e0-91d56354f51c","Type":"ContainerStarted","Data":"c15402915792100ba65bb2cb7e944e67db59a736d6f3eded3c3baeb2386bcf9b"} Nov 24 14:17:23 crc kubenswrapper[5039]: I1124 14:17:23.274199 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" event={"ID":"09487809-1d9c-44f7-81e0-91d56354f51c","Type":"ContainerStarted","Data":"966ac531f37affb48bbceb4cd6960df283c067c5bcf617e5a0e9641495c0620a"} Nov 24 14:17:23 crc kubenswrapper[5039]: I1124 14:17:23.292441 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" podStartSLOduration=1.849414024 podStartE2EDuration="2.292423405s" podCreationTimestamp="2025-11-24 14:17:21 +0000 UTC" firstStartedPulling="2025-11-24 14:17:22.250486985 +0000 UTC m=+3554.689611485" lastFinishedPulling="2025-11-24 14:17:22.693496366 +0000 UTC m=+3555.132620866" observedRunningTime="2025-11-24 14:17:23.290698053 +0000 UTC m=+3555.729822563" watchObservedRunningTime="2025-11-24 14:17:23.292423405 +0000 UTC m=+3555.731547915" Nov 24 14:17:50 crc kubenswrapper[5039]: I1124 14:17:50.102496 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:17:50 crc kubenswrapper[5039]: I1124 14:17:50.103075 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:17:50 crc kubenswrapper[5039]: I1124 14:17:50.103131 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 14:17:50 crc kubenswrapper[5039]: I1124 14:17:50.103972 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 14:17:50 crc kubenswrapper[5039]: I1124 14:17:50.104040 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" gracePeriod=600 Nov 24 14:17:50 crc kubenswrapper[5039]: E1124 14:17:50.228924 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:17:50 crc kubenswrapper[5039]: I1124 14:17:50.610615 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" exitCode=0 Nov 24 14:17:50 crc kubenswrapper[5039]: I1124 14:17:50.610675 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a"} Nov 24 14:17:50 crc kubenswrapper[5039]: I1124 14:17:50.610721 5039 scope.go:117] "RemoveContainer" containerID="1622d9e91171952ff1f7ba0ab24928decb597098c11dc9185b79112e31ea3dec" Nov 24 14:17:50 crc kubenswrapper[5039]: I1124 14:17:50.612095 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:17:50 crc kubenswrapper[5039]: E1124 14:17:50.613205 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:18:04 crc kubenswrapper[5039]: I1124 14:18:04.307542 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:18:04 crc kubenswrapper[5039]: E1124 14:18:04.308599 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:18:19 crc kubenswrapper[5039]: I1124 14:18:19.307851 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:18:19 crc kubenswrapper[5039]: E1124 14:18:19.309805 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:18:34 crc kubenswrapper[5039]: I1124 14:18:34.307105 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:18:34 crc kubenswrapper[5039]: E1124 14:18:34.307986 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:18:48 crc kubenswrapper[5039]: I1124 14:18:48.313731 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:18:48 crc kubenswrapper[5039]: E1124 14:18:48.314540 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:19:00 crc kubenswrapper[5039]: I1124 14:19:00.307540 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:19:00 crc kubenswrapper[5039]: E1124 14:19:00.308645 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:19:14 crc kubenswrapper[5039]: I1124 14:19:14.306964 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:19:14 crc kubenswrapper[5039]: E1124 14:19:14.308303 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:19:29 crc kubenswrapper[5039]: I1124 14:19:29.306572 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:19:29 crc kubenswrapper[5039]: E1124 14:19:29.307702 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:19:40 crc kubenswrapper[5039]: I1124 14:19:40.306385 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:19:40 crc kubenswrapper[5039]: E1124 14:19:40.307245 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:19:53 crc kubenswrapper[5039]: I1124 14:19:53.307138 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:19:53 crc kubenswrapper[5039]: E1124 14:19:53.307985 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:20:05 crc kubenswrapper[5039]: I1124 14:20:05.307208 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:20:05 crc kubenswrapper[5039]: E1124 14:20:05.307945 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:20:11 crc kubenswrapper[5039]: I1124 14:20:11.082175 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6qbwh"] Nov 24 14:20:11 crc kubenswrapper[5039]: I1124 14:20:11.086131 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6qbwh" Nov 24 14:20:11 crc kubenswrapper[5039]: I1124 14:20:11.101926 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6qbwh"] Nov 24 14:20:11 crc kubenswrapper[5039]: I1124 14:20:11.156583 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzf49\" (UniqueName: \"kubernetes.io/projected/80a90a41-705c-45e9-86e5-4a678f44202a-kube-api-access-mzf49\") pod \"redhat-operators-6qbwh\" (UID: \"80a90a41-705c-45e9-86e5-4a678f44202a\") " pod="openshift-marketplace/redhat-operators-6qbwh" Nov 24 14:20:11 crc kubenswrapper[5039]: I1124 14:20:11.157052 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80a90a41-705c-45e9-86e5-4a678f44202a-utilities\") pod \"redhat-operators-6qbwh\" (UID: \"80a90a41-705c-45e9-86e5-4a678f44202a\") " pod="openshift-marketplace/redhat-operators-6qbwh" Nov 24 14:20:11 crc kubenswrapper[5039]: I1124 14:20:11.157096 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80a90a41-705c-45e9-86e5-4a678f44202a-catalog-content\") pod \"redhat-operators-6qbwh\" (UID: \"80a90a41-705c-45e9-86e5-4a678f44202a\") " pod="openshift-marketplace/redhat-operators-6qbwh" Nov 24 14:20:11 crc kubenswrapper[5039]: I1124 14:20:11.257947 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzf49\" (UniqueName: \"kubernetes.io/projected/80a90a41-705c-45e9-86e5-4a678f44202a-kube-api-access-mzf49\") pod \"redhat-operators-6qbwh\" (UID: \"80a90a41-705c-45e9-86e5-4a678f44202a\") " pod="openshift-marketplace/redhat-operators-6qbwh" Nov 24 14:20:11 crc kubenswrapper[5039]: I1124 14:20:11.258083 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80a90a41-705c-45e9-86e5-4a678f44202a-utilities\") pod \"redhat-operators-6qbwh\" (UID: \"80a90a41-705c-45e9-86e5-4a678f44202a\") " pod="openshift-marketplace/redhat-operators-6qbwh" Nov 24 14:20:11 crc kubenswrapper[5039]: I1124 14:20:11.258118 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80a90a41-705c-45e9-86e5-4a678f44202a-catalog-content\") pod \"redhat-operators-6qbwh\" (UID: \"80a90a41-705c-45e9-86e5-4a678f44202a\") " pod="openshift-marketplace/redhat-operators-6qbwh" Nov 24 14:20:11 crc kubenswrapper[5039]: I1124 14:20:11.258564 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80a90a41-705c-45e9-86e5-4a678f44202a-catalog-content\") pod \"redhat-operators-6qbwh\" (UID: \"80a90a41-705c-45e9-86e5-4a678f44202a\") " pod="openshift-marketplace/redhat-operators-6qbwh" Nov 24 14:20:11 crc kubenswrapper[5039]: I1124 14:20:11.259015 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80a90a41-705c-45e9-86e5-4a678f44202a-utilities\") pod \"redhat-operators-6qbwh\" (UID: \"80a90a41-705c-45e9-86e5-4a678f44202a\") " pod="openshift-marketplace/redhat-operators-6qbwh" Nov 24 14:20:11 crc kubenswrapper[5039]: I1124 14:20:11.286543 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzf49\" (UniqueName: \"kubernetes.io/projected/80a90a41-705c-45e9-86e5-4a678f44202a-kube-api-access-mzf49\") pod \"redhat-operators-6qbwh\" (UID: \"80a90a41-705c-45e9-86e5-4a678f44202a\") " pod="openshift-marketplace/redhat-operators-6qbwh" Nov 24 14:20:11 crc kubenswrapper[5039]: I1124 14:20:11.413952 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6qbwh" Nov 24 14:20:11 crc kubenswrapper[5039]: I1124 14:20:11.864329 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6qbwh"] Nov 24 14:20:12 crc kubenswrapper[5039]: I1124 14:20:12.355611 5039 generic.go:334] "Generic (PLEG): container finished" podID="09487809-1d9c-44f7-81e0-91d56354f51c" containerID="966ac531f37affb48bbceb4cd6960df283c067c5bcf617e5a0e9641495c0620a" exitCode=0 Nov 24 14:20:12 crc kubenswrapper[5039]: I1124 14:20:12.355716 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" event={"ID":"09487809-1d9c-44f7-81e0-91d56354f51c","Type":"ContainerDied","Data":"966ac531f37affb48bbceb4cd6960df283c067c5bcf617e5a0e9641495c0620a"} Nov 24 14:20:12 crc kubenswrapper[5039]: I1124 14:20:12.358211 5039 generic.go:334] "Generic (PLEG): container finished" podID="80a90a41-705c-45e9-86e5-4a678f44202a" containerID="13890a5539c6d70557709ca9853ad3d7aa7946b9116f787d4400314351ce4fa9" exitCode=0 Nov 24 14:20:12 crc kubenswrapper[5039]: I1124 14:20:12.358248 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6qbwh" event={"ID":"80a90a41-705c-45e9-86e5-4a678f44202a","Type":"ContainerDied","Data":"13890a5539c6d70557709ca9853ad3d7aa7946b9116f787d4400314351ce4fa9"} Nov 24 14:20:12 crc kubenswrapper[5039]: I1124 14:20:12.358265 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6qbwh" event={"ID":"80a90a41-705c-45e9-86e5-4a678f44202a","Type":"ContainerStarted","Data":"4a5fecc9db7f0570ba536f5296a21b77abc0199b66a410d1aaacd7856ceebc9f"} Nov 24 14:20:12 crc kubenswrapper[5039]: I1124 14:20:12.360206 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 14:20:13 crc kubenswrapper[5039]: I1124 14:20:13.371094 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6qbwh" event={"ID":"80a90a41-705c-45e9-86e5-4a678f44202a","Type":"ContainerStarted","Data":"298a9f2806896366dc47c3fd67f7f2913511709d1a4fe833db643a3cff520d3b"} Nov 24 14:20:13 crc kubenswrapper[5039]: I1124 14:20:13.909707 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.015310 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-libvirt-combined-ca-bundle\") pod \"09487809-1d9c-44f7-81e0-91d56354f51c\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.015670 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-inventory\") pod \"09487809-1d9c-44f7-81e0-91d56354f51c\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.015800 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-libvirt-secret-0\") pod \"09487809-1d9c-44f7-81e0-91d56354f51c\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.015889 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-ssh-key\") pod \"09487809-1d9c-44f7-81e0-91d56354f51c\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.016025 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-ceph\") pod \"09487809-1d9c-44f7-81e0-91d56354f51c\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.016157 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwtj2\" (UniqueName: \"kubernetes.io/projected/09487809-1d9c-44f7-81e0-91d56354f51c-kube-api-access-wwtj2\") pod \"09487809-1d9c-44f7-81e0-91d56354f51c\" (UID: \"09487809-1d9c-44f7-81e0-91d56354f51c\") " Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.023980 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09487809-1d9c-44f7-81e0-91d56354f51c-kube-api-access-wwtj2" (OuterVolumeSpecName: "kube-api-access-wwtj2") pod "09487809-1d9c-44f7-81e0-91d56354f51c" (UID: "09487809-1d9c-44f7-81e0-91d56354f51c"). InnerVolumeSpecName "kube-api-access-wwtj2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.024632 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "09487809-1d9c-44f7-81e0-91d56354f51c" (UID: "09487809-1d9c-44f7-81e0-91d56354f51c"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.024658 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-ceph" (OuterVolumeSpecName: "ceph") pod "09487809-1d9c-44f7-81e0-91d56354f51c" (UID: "09487809-1d9c-44f7-81e0-91d56354f51c"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.047145 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "09487809-1d9c-44f7-81e0-91d56354f51c" (UID: "09487809-1d9c-44f7-81e0-91d56354f51c"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.048262 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-inventory" (OuterVolumeSpecName: "inventory") pod "09487809-1d9c-44f7-81e0-91d56354f51c" (UID: "09487809-1d9c-44f7-81e0-91d56354f51c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.050381 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "09487809-1d9c-44f7-81e0-91d56354f51c" (UID: "09487809-1d9c-44f7-81e0-91d56354f51c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.118858 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwtj2\" (UniqueName: \"kubernetes.io/projected/09487809-1d9c-44f7-81e0-91d56354f51c-kube-api-access-wwtj2\") on node \"crc\" DevicePath \"\"" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.118894 5039 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.118905 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.118917 5039 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.118925 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.118932 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/09487809-1d9c-44f7-81e0-91d56354f51c-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.382766 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.382765 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5s78f" event={"ID":"09487809-1d9c-44f7-81e0-91d56354f51c","Type":"ContainerDied","Data":"c15402915792100ba65bb2cb7e944e67db59a736d6f3eded3c3baeb2386bcf9b"} Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.383342 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c15402915792100ba65bb2cb7e944e67db59a736d6f3eded3c3baeb2386bcf9b" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.496701 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc"] Nov 24 14:20:14 crc kubenswrapper[5039]: E1124 14:20:14.497213 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09487809-1d9c-44f7-81e0-91d56354f51c" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.497230 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="09487809-1d9c-44f7-81e0-91d56354f51c" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.497478 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="09487809-1d9c-44f7-81e0-91d56354f51c" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.498260 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.501219 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.501987 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ceph-nova" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.502013 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.502324 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.502354 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.502589 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.502655 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.504115 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.505785 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc"] Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.508898 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.633267 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.633325 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgvbl\" (UniqueName: \"kubernetes.io/projected/49f0a456-4039-4471-9dd2-c17ea42981e3-kube-api-access-fgvbl\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.633355 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/49f0a456-4039-4471-9dd2-c17ea42981e3-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.633376 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.633425 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.633490 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.633524 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.633552 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.633571 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.633610 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.633637 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.735842 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.735913 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.735949 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.735983 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgvbl\" (UniqueName: \"kubernetes.io/projected/49f0a456-4039-4471-9dd2-c17ea42981e3-kube-api-access-fgvbl\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.736004 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/49f0a456-4039-4471-9dd2-c17ea42981e3-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.736031 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.736086 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.736145 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.736164 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.736194 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.736214 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.737252 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/49f0a456-4039-4471-9dd2-c17ea42981e3-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.737911 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.740673 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.740809 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.741049 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.741654 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.743327 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.744041 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.746008 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.746635 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.753681 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgvbl\" (UniqueName: \"kubernetes.io/projected/49f0a456-4039-4471-9dd2-c17ea42981e3-kube-api-access-fgvbl\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:14 crc kubenswrapper[5039]: I1124 14:20:14.827021 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:20:15 crc kubenswrapper[5039]: W1124 14:20:15.196289 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49f0a456_4039_4471_9dd2_c17ea42981e3.slice/crio-84fd498420435e10c1a5890df6f3d70f07a22d7c713b2e86b22108e34adbfbae WatchSource:0}: Error finding container 84fd498420435e10c1a5890df6f3d70f07a22d7c713b2e86b22108e34adbfbae: Status 404 returned error can't find the container with id 84fd498420435e10c1a5890df6f3d70f07a22d7c713b2e86b22108e34adbfbae Nov 24 14:20:15 crc kubenswrapper[5039]: I1124 14:20:15.202662 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc"] Nov 24 14:20:15 crc kubenswrapper[5039]: I1124 14:20:15.396735 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" event={"ID":"49f0a456-4039-4471-9dd2-c17ea42981e3","Type":"ContainerStarted","Data":"84fd498420435e10c1a5890df6f3d70f07a22d7c713b2e86b22108e34adbfbae"} Nov 24 14:20:15 crc kubenswrapper[5039]: I1124 14:20:15.399182 5039 generic.go:334] "Generic (PLEG): container finished" podID="80a90a41-705c-45e9-86e5-4a678f44202a" containerID="298a9f2806896366dc47c3fd67f7f2913511709d1a4fe833db643a3cff520d3b" exitCode=0 Nov 24 14:20:15 crc kubenswrapper[5039]: I1124 14:20:15.399210 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6qbwh" event={"ID":"80a90a41-705c-45e9-86e5-4a678f44202a","Type":"ContainerDied","Data":"298a9f2806896366dc47c3fd67f7f2913511709d1a4fe833db643a3cff520d3b"} Nov 24 14:20:16 crc kubenswrapper[5039]: I1124 14:20:16.410038 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" event={"ID":"49f0a456-4039-4471-9dd2-c17ea42981e3","Type":"ContainerStarted","Data":"1a158b30adf6baf9ec40e1f6ed69664209ad03d9fdc592e23aa6015d33df96b0"} Nov 24 14:20:16 crc kubenswrapper[5039]: I1124 14:20:16.412778 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6qbwh" event={"ID":"80a90a41-705c-45e9-86e5-4a678f44202a","Type":"ContainerStarted","Data":"50239e8b9500cca378be6d22140c4d89c2bdf0e0ea1a3ebbd1633831bb5425ac"} Nov 24 14:20:16 crc kubenswrapper[5039]: I1124 14:20:16.459057 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" podStartSLOduration=1.745526439 podStartE2EDuration="2.459037397s" podCreationTimestamp="2025-11-24 14:20:14 +0000 UTC" firstStartedPulling="2025-11-24 14:20:15.198827775 +0000 UTC m=+3727.637952275" lastFinishedPulling="2025-11-24 14:20:15.912338723 +0000 UTC m=+3728.351463233" observedRunningTime="2025-11-24 14:20:16.451721678 +0000 UTC m=+3728.890846178" watchObservedRunningTime="2025-11-24 14:20:16.459037397 +0000 UTC m=+3728.898161897" Nov 24 14:20:20 crc kubenswrapper[5039]: I1124 14:20:20.306402 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:20:20 crc kubenswrapper[5039]: E1124 14:20:20.308698 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:20:21 crc kubenswrapper[5039]: I1124 14:20:21.414395 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6qbwh" Nov 24 14:20:21 crc kubenswrapper[5039]: I1124 14:20:21.414901 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6qbwh" Nov 24 14:20:22 crc kubenswrapper[5039]: I1124 14:20:22.463008 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6qbwh" podUID="80a90a41-705c-45e9-86e5-4a678f44202a" containerName="registry-server" probeResult="failure" output=< Nov 24 14:20:22 crc kubenswrapper[5039]: timeout: failed to connect service ":50051" within 1s Nov 24 14:20:22 crc kubenswrapper[5039]: > Nov 24 14:20:31 crc kubenswrapper[5039]: I1124 14:20:31.464911 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6qbwh" Nov 24 14:20:31 crc kubenswrapper[5039]: I1124 14:20:31.489691 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6qbwh" podStartSLOduration=16.987512541 podStartE2EDuration="20.489671742s" podCreationTimestamp="2025-11-24 14:20:11 +0000 UTC" firstStartedPulling="2025-11-24 14:20:12.359963163 +0000 UTC m=+3724.799087653" lastFinishedPulling="2025-11-24 14:20:15.862122354 +0000 UTC m=+3728.301246854" observedRunningTime="2025-11-24 14:20:16.475088991 +0000 UTC m=+3728.914213491" watchObservedRunningTime="2025-11-24 14:20:31.489671742 +0000 UTC m=+3743.928796242" Nov 24 14:20:31 crc kubenswrapper[5039]: I1124 14:20:31.533739 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6qbwh" Nov 24 14:20:31 crc kubenswrapper[5039]: I1124 14:20:31.699672 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6qbwh"] Nov 24 14:20:32 crc kubenswrapper[5039]: I1124 14:20:32.587998 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6qbwh" podUID="80a90a41-705c-45e9-86e5-4a678f44202a" containerName="registry-server" containerID="cri-o://50239e8b9500cca378be6d22140c4d89c2bdf0e0ea1a3ebbd1633831bb5425ac" gracePeriod=2 Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.083240 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6qbwh" Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.148292 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mzf49\" (UniqueName: \"kubernetes.io/projected/80a90a41-705c-45e9-86e5-4a678f44202a-kube-api-access-mzf49\") pod \"80a90a41-705c-45e9-86e5-4a678f44202a\" (UID: \"80a90a41-705c-45e9-86e5-4a678f44202a\") " Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.148414 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80a90a41-705c-45e9-86e5-4a678f44202a-utilities\") pod \"80a90a41-705c-45e9-86e5-4a678f44202a\" (UID: \"80a90a41-705c-45e9-86e5-4a678f44202a\") " Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.148494 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80a90a41-705c-45e9-86e5-4a678f44202a-catalog-content\") pod \"80a90a41-705c-45e9-86e5-4a678f44202a\" (UID: \"80a90a41-705c-45e9-86e5-4a678f44202a\") " Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.150006 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80a90a41-705c-45e9-86e5-4a678f44202a-utilities" (OuterVolumeSpecName: "utilities") pod "80a90a41-705c-45e9-86e5-4a678f44202a" (UID: "80a90a41-705c-45e9-86e5-4a678f44202a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.174914 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80a90a41-705c-45e9-86e5-4a678f44202a-kube-api-access-mzf49" (OuterVolumeSpecName: "kube-api-access-mzf49") pod "80a90a41-705c-45e9-86e5-4a678f44202a" (UID: "80a90a41-705c-45e9-86e5-4a678f44202a"). InnerVolumeSpecName "kube-api-access-mzf49". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.233171 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80a90a41-705c-45e9-86e5-4a678f44202a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "80a90a41-705c-45e9-86e5-4a678f44202a" (UID: "80a90a41-705c-45e9-86e5-4a678f44202a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.251813 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mzf49\" (UniqueName: \"kubernetes.io/projected/80a90a41-705c-45e9-86e5-4a678f44202a-kube-api-access-mzf49\") on node \"crc\" DevicePath \"\"" Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.251857 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80a90a41-705c-45e9-86e5-4a678f44202a-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.251869 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80a90a41-705c-45e9-86e5-4a678f44202a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.599407 5039 generic.go:334] "Generic (PLEG): container finished" podID="80a90a41-705c-45e9-86e5-4a678f44202a" containerID="50239e8b9500cca378be6d22140c4d89c2bdf0e0ea1a3ebbd1633831bb5425ac" exitCode=0 Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.599466 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6qbwh" event={"ID":"80a90a41-705c-45e9-86e5-4a678f44202a","Type":"ContainerDied","Data":"50239e8b9500cca378be6d22140c4d89c2bdf0e0ea1a3ebbd1633831bb5425ac"} Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.599548 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6qbwh" Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.599575 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6qbwh" event={"ID":"80a90a41-705c-45e9-86e5-4a678f44202a","Type":"ContainerDied","Data":"4a5fecc9db7f0570ba536f5296a21b77abc0199b66a410d1aaacd7856ceebc9f"} Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.599604 5039 scope.go:117] "RemoveContainer" containerID="50239e8b9500cca378be6d22140c4d89c2bdf0e0ea1a3ebbd1633831bb5425ac" Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.626359 5039 scope.go:117] "RemoveContainer" containerID="298a9f2806896366dc47c3fd67f7f2913511709d1a4fe833db643a3cff520d3b" Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.648404 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6qbwh"] Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.657377 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6qbwh"] Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.687743 5039 scope.go:117] "RemoveContainer" containerID="13890a5539c6d70557709ca9853ad3d7aa7946b9116f787d4400314351ce4fa9" Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.711979 5039 scope.go:117] "RemoveContainer" containerID="50239e8b9500cca378be6d22140c4d89c2bdf0e0ea1a3ebbd1633831bb5425ac" Nov 24 14:20:33 crc kubenswrapper[5039]: E1124 14:20:33.712441 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"50239e8b9500cca378be6d22140c4d89c2bdf0e0ea1a3ebbd1633831bb5425ac\": container with ID starting with 50239e8b9500cca378be6d22140c4d89c2bdf0e0ea1a3ebbd1633831bb5425ac not found: ID does not exist" containerID="50239e8b9500cca378be6d22140c4d89c2bdf0e0ea1a3ebbd1633831bb5425ac" Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.712475 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50239e8b9500cca378be6d22140c4d89c2bdf0e0ea1a3ebbd1633831bb5425ac"} err="failed to get container status \"50239e8b9500cca378be6d22140c4d89c2bdf0e0ea1a3ebbd1633831bb5425ac\": rpc error: code = NotFound desc = could not find container \"50239e8b9500cca378be6d22140c4d89c2bdf0e0ea1a3ebbd1633831bb5425ac\": container with ID starting with 50239e8b9500cca378be6d22140c4d89c2bdf0e0ea1a3ebbd1633831bb5425ac not found: ID does not exist" Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.712525 5039 scope.go:117] "RemoveContainer" containerID="298a9f2806896366dc47c3fd67f7f2913511709d1a4fe833db643a3cff520d3b" Nov 24 14:20:33 crc kubenswrapper[5039]: E1124 14:20:33.712988 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"298a9f2806896366dc47c3fd67f7f2913511709d1a4fe833db643a3cff520d3b\": container with ID starting with 298a9f2806896366dc47c3fd67f7f2913511709d1a4fe833db643a3cff520d3b not found: ID does not exist" containerID="298a9f2806896366dc47c3fd67f7f2913511709d1a4fe833db643a3cff520d3b" Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.713039 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"298a9f2806896366dc47c3fd67f7f2913511709d1a4fe833db643a3cff520d3b"} err="failed to get container status \"298a9f2806896366dc47c3fd67f7f2913511709d1a4fe833db643a3cff520d3b\": rpc error: code = NotFound desc = could not find container \"298a9f2806896366dc47c3fd67f7f2913511709d1a4fe833db643a3cff520d3b\": container with ID starting with 298a9f2806896366dc47c3fd67f7f2913511709d1a4fe833db643a3cff520d3b not found: ID does not exist" Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.713076 5039 scope.go:117] "RemoveContainer" containerID="13890a5539c6d70557709ca9853ad3d7aa7946b9116f787d4400314351ce4fa9" Nov 24 14:20:33 crc kubenswrapper[5039]: E1124 14:20:33.713607 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13890a5539c6d70557709ca9853ad3d7aa7946b9116f787d4400314351ce4fa9\": container with ID starting with 13890a5539c6d70557709ca9853ad3d7aa7946b9116f787d4400314351ce4fa9 not found: ID does not exist" containerID="13890a5539c6d70557709ca9853ad3d7aa7946b9116f787d4400314351ce4fa9" Nov 24 14:20:33 crc kubenswrapper[5039]: I1124 14:20:33.713641 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13890a5539c6d70557709ca9853ad3d7aa7946b9116f787d4400314351ce4fa9"} err="failed to get container status \"13890a5539c6d70557709ca9853ad3d7aa7946b9116f787d4400314351ce4fa9\": rpc error: code = NotFound desc = could not find container \"13890a5539c6d70557709ca9853ad3d7aa7946b9116f787d4400314351ce4fa9\": container with ID starting with 13890a5539c6d70557709ca9853ad3d7aa7946b9116f787d4400314351ce4fa9 not found: ID does not exist" Nov 24 14:20:34 crc kubenswrapper[5039]: I1124 14:20:34.324940 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80a90a41-705c-45e9-86e5-4a678f44202a" path="/var/lib/kubelet/pods/80a90a41-705c-45e9-86e5-4a678f44202a/volumes" Nov 24 14:20:35 crc kubenswrapper[5039]: I1124 14:20:35.307545 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:20:35 crc kubenswrapper[5039]: E1124 14:20:35.308444 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:20:48 crc kubenswrapper[5039]: I1124 14:20:48.318848 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:20:48 crc kubenswrapper[5039]: E1124 14:20:48.319889 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:21:02 crc kubenswrapper[5039]: I1124 14:21:02.307603 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:21:02 crc kubenswrapper[5039]: E1124 14:21:02.309494 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:21:15 crc kubenswrapper[5039]: I1124 14:21:15.308115 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:21:15 crc kubenswrapper[5039]: E1124 14:21:15.309743 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:21:27 crc kubenswrapper[5039]: I1124 14:21:27.306579 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:21:27 crc kubenswrapper[5039]: E1124 14:21:27.307751 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:21:40 crc kubenswrapper[5039]: I1124 14:21:40.307192 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:21:40 crc kubenswrapper[5039]: E1124 14:21:40.309858 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:21:55 crc kubenswrapper[5039]: I1124 14:21:55.307871 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:21:55 crc kubenswrapper[5039]: E1124 14:21:55.308884 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:22:08 crc kubenswrapper[5039]: I1124 14:22:08.307356 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:22:08 crc kubenswrapper[5039]: E1124 14:22:08.308115 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:22:22 crc kubenswrapper[5039]: I1124 14:22:22.307848 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:22:22 crc kubenswrapper[5039]: E1124 14:22:22.308668 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:22:34 crc kubenswrapper[5039]: I1124 14:22:34.307485 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:22:34 crc kubenswrapper[5039]: E1124 14:22:34.308369 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:22:48 crc kubenswrapper[5039]: I1124 14:22:48.314579 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:22:48 crc kubenswrapper[5039]: E1124 14:22:48.315399 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:22:59 crc kubenswrapper[5039]: I1124 14:22:59.307401 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:23:00 crc kubenswrapper[5039]: I1124 14:23:00.202133 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"a44b192acf46995fecc0c4d4f9cc70c29cd209cc1b45f88a225e15065c976530"} Nov 24 14:23:41 crc kubenswrapper[5039]: E1124 14:23:41.253585 5039 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49f0a456_4039_4471_9dd2_c17ea42981e3.slice/crio-conmon-1a158b30adf6baf9ec40e1f6ed69664209ad03d9fdc592e23aa6015d33df96b0.scope\": RecentStats: unable to find data in memory cache]" Nov 24 14:23:41 crc kubenswrapper[5039]: I1124 14:23:41.633185 5039 generic.go:334] "Generic (PLEG): container finished" podID="49f0a456-4039-4471-9dd2-c17ea42981e3" containerID="1a158b30adf6baf9ec40e1f6ed69664209ad03d9fdc592e23aa6015d33df96b0" exitCode=0 Nov 24 14:23:41 crc kubenswrapper[5039]: I1124 14:23:41.633284 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" event={"ID":"49f0a456-4039-4471-9dd2-c17ea42981e3","Type":"ContainerDied","Data":"1a158b30adf6baf9ec40e1f6ed69664209ad03d9fdc592e23aa6015d33df96b0"} Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.099566 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.232540 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-migration-ssh-key-1\") pod \"49f0a456-4039-4471-9dd2-c17ea42981e3\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.232625 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/49f0a456-4039-4471-9dd2-c17ea42981e3-ceph-nova-0\") pod \"49f0a456-4039-4471-9dd2-c17ea42981e3\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.232652 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-cell1-compute-config-0\") pod \"49f0a456-4039-4471-9dd2-c17ea42981e3\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.232715 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-extra-config-0\") pod \"49f0a456-4039-4471-9dd2-c17ea42981e3\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.232753 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-migration-ssh-key-0\") pod \"49f0a456-4039-4471-9dd2-c17ea42981e3\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.232772 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-ssh-key\") pod \"49f0a456-4039-4471-9dd2-c17ea42981e3\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.232806 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-custom-ceph-combined-ca-bundle\") pod \"49f0a456-4039-4471-9dd2-c17ea42981e3\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.232872 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-cell1-compute-config-1\") pod \"49f0a456-4039-4471-9dd2-c17ea42981e3\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.232908 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-ceph\") pod \"49f0a456-4039-4471-9dd2-c17ea42981e3\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.232937 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fgvbl\" (UniqueName: \"kubernetes.io/projected/49f0a456-4039-4471-9dd2-c17ea42981e3-kube-api-access-fgvbl\") pod \"49f0a456-4039-4471-9dd2-c17ea42981e3\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.233054 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-inventory\") pod \"49f0a456-4039-4471-9dd2-c17ea42981e3\" (UID: \"49f0a456-4039-4471-9dd2-c17ea42981e3\") " Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.240554 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-ceph" (OuterVolumeSpecName: "ceph") pod "49f0a456-4039-4471-9dd2-c17ea42981e3" (UID: "49f0a456-4039-4471-9dd2-c17ea42981e3"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.241859 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49f0a456-4039-4471-9dd2-c17ea42981e3-kube-api-access-fgvbl" (OuterVolumeSpecName: "kube-api-access-fgvbl") pod "49f0a456-4039-4471-9dd2-c17ea42981e3" (UID: "49f0a456-4039-4471-9dd2-c17ea42981e3"). InnerVolumeSpecName "kube-api-access-fgvbl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.248038 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-custom-ceph-combined-ca-bundle" (OuterVolumeSpecName: "nova-custom-ceph-combined-ca-bundle") pod "49f0a456-4039-4471-9dd2-c17ea42981e3" (UID: "49f0a456-4039-4471-9dd2-c17ea42981e3"). InnerVolumeSpecName "nova-custom-ceph-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.287801 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "49f0a456-4039-4471-9dd2-c17ea42981e3" (UID: "49f0a456-4039-4471-9dd2-c17ea42981e3"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.287953 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "49f0a456-4039-4471-9dd2-c17ea42981e3" (UID: "49f0a456-4039-4471-9dd2-c17ea42981e3"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.288439 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "49f0a456-4039-4471-9dd2-c17ea42981e3" (UID: "49f0a456-4039-4471-9dd2-c17ea42981e3"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.288565 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "49f0a456-4039-4471-9dd2-c17ea42981e3" (UID: "49f0a456-4039-4471-9dd2-c17ea42981e3"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.292295 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-inventory" (OuterVolumeSpecName: "inventory") pod "49f0a456-4039-4471-9dd2-c17ea42981e3" (UID: "49f0a456-4039-4471-9dd2-c17ea42981e3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.303486 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49f0a456-4039-4471-9dd2-c17ea42981e3-ceph-nova-0" (OuterVolumeSpecName: "ceph-nova-0") pod "49f0a456-4039-4471-9dd2-c17ea42981e3" (UID: "49f0a456-4039-4471-9dd2-c17ea42981e3"). InnerVolumeSpecName "ceph-nova-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.324877 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "49f0a456-4039-4471-9dd2-c17ea42981e3" (UID: "49f0a456-4039-4471-9dd2-c17ea42981e3"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.326329 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "49f0a456-4039-4471-9dd2-c17ea42981e3" (UID: "49f0a456-4039-4471-9dd2-c17ea42981e3"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.335027 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.335056 5039 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.335068 5039 reconciler_common.go:293] "Volume detached for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/49f0a456-4039-4471-9dd2-c17ea42981e3-ceph-nova-0\") on node \"crc\" DevicePath \"\"" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.335077 5039 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.335085 5039 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.335093 5039 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.335101 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.335110 5039 reconciler_common.go:293] "Volume detached for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-custom-ceph-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.335118 5039 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.335128 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/49f0a456-4039-4471-9dd2-c17ea42981e3-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.335136 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fgvbl\" (UniqueName: \"kubernetes.io/projected/49f0a456-4039-4471-9dd2-c17ea42981e3-kube-api-access-fgvbl\") on node \"crc\" DevicePath \"\"" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.653885 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" event={"ID":"49f0a456-4039-4471-9dd2-c17ea42981e3","Type":"ContainerDied","Data":"84fd498420435e10c1a5890df6f3d70f07a22d7c713b2e86b22108e34adbfbae"} Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.654217 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="84fd498420435e10c1a5890df6f3d70f07a22d7c713b2e86b22108e34adbfbae" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.653945 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.774895 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2"] Nov 24 14:23:43 crc kubenswrapper[5039]: E1124 14:23:43.775492 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80a90a41-705c-45e9-86e5-4a678f44202a" containerName="extract-content" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.775529 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="80a90a41-705c-45e9-86e5-4a678f44202a" containerName="extract-content" Nov 24 14:23:43 crc kubenswrapper[5039]: E1124 14:23:43.775561 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49f0a456-4039-4471-9dd2-c17ea42981e3" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.775572 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="49f0a456-4039-4471-9dd2-c17ea42981e3" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Nov 24 14:23:43 crc kubenswrapper[5039]: E1124 14:23:43.775592 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80a90a41-705c-45e9-86e5-4a678f44202a" containerName="extract-utilities" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.775600 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="80a90a41-705c-45e9-86e5-4a678f44202a" containerName="extract-utilities" Nov 24 14:23:43 crc kubenswrapper[5039]: E1124 14:23:43.775630 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80a90a41-705c-45e9-86e5-4a678f44202a" containerName="registry-server" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.775637 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="80a90a41-705c-45e9-86e5-4a678f44202a" containerName="registry-server" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.775910 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="80a90a41-705c-45e9-86e5-4a678f44202a" containerName="registry-server" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.775940 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="49f0a456-4039-4471-9dd2-c17ea42981e3" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.776966 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.779441 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.780246 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.781044 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.781294 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.781463 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.781664 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.787891 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2"] Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.848359 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.848546 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.848607 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.848838 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceph\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.848893 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpj8v\" (UniqueName: \"kubernetes.io/projected/d1d48eba-5a90-4ca3-b298-f19175f93608-kube-api-access-zpj8v\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.849010 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.849088 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.849133 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.951435 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.951494 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.951536 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.952962 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.953289 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.953330 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.953491 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceph\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.953537 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpj8v\" (UniqueName: \"kubernetes.io/projected/d1d48eba-5a90-4ca3-b298-f19175f93608-kube-api-access-zpj8v\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.956068 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.956089 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.957549 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.957756 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.957992 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.958415 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceph\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.958517 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:43 crc kubenswrapper[5039]: I1124 14:23:43.971893 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpj8v\" (UniqueName: \"kubernetes.io/projected/d1d48eba-5a90-4ca3-b298-f19175f93608-kube-api-access-zpj8v\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:44 crc kubenswrapper[5039]: I1124 14:23:44.101701 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:23:44 crc kubenswrapper[5039]: I1124 14:23:44.878311 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2"] Nov 24 14:23:45 crc kubenswrapper[5039]: I1124 14:23:45.673709 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" event={"ID":"d1d48eba-5a90-4ca3-b298-f19175f93608","Type":"ContainerStarted","Data":"bafe97c8d5d59a9b31a2cd4f7889b9e11c7d8162b6121d4e013bec0dc0d7bd4e"} Nov 24 14:23:45 crc kubenswrapper[5039]: I1124 14:23:45.674029 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" event={"ID":"d1d48eba-5a90-4ca3-b298-f19175f93608","Type":"ContainerStarted","Data":"2bdb613c8bb8f215ffbeb7e8ea5c5badd6415ac2514b1de72fd3dda4316bad0c"} Nov 24 14:23:45 crc kubenswrapper[5039]: I1124 14:23:45.693850 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" podStartSLOduration=2.241516088 podStartE2EDuration="2.693832166s" podCreationTimestamp="2025-11-24 14:23:43 +0000 UTC" firstStartedPulling="2025-11-24 14:23:44.88158391 +0000 UTC m=+3937.320708410" lastFinishedPulling="2025-11-24 14:23:45.333899988 +0000 UTC m=+3937.773024488" observedRunningTime="2025-11-24 14:23:45.693292262 +0000 UTC m=+3938.132416772" watchObservedRunningTime="2025-11-24 14:23:45.693832166 +0000 UTC m=+3938.132956676" Nov 24 14:23:49 crc kubenswrapper[5039]: I1124 14:23:49.155640 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wgck2"] Nov 24 14:23:49 crc kubenswrapper[5039]: I1124 14:23:49.158620 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wgck2" Nov 24 14:23:49 crc kubenswrapper[5039]: I1124 14:23:49.168984 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wgck2"] Nov 24 14:23:49 crc kubenswrapper[5039]: I1124 14:23:49.265045 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb470409-995e-47fd-97cd-8a38152a328c-catalog-content\") pod \"community-operators-wgck2\" (UID: \"fb470409-995e-47fd-97cd-8a38152a328c\") " pod="openshift-marketplace/community-operators-wgck2" Nov 24 14:23:49 crc kubenswrapper[5039]: I1124 14:23:49.265539 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb470409-995e-47fd-97cd-8a38152a328c-utilities\") pod \"community-operators-wgck2\" (UID: \"fb470409-995e-47fd-97cd-8a38152a328c\") " pod="openshift-marketplace/community-operators-wgck2" Nov 24 14:23:49 crc kubenswrapper[5039]: I1124 14:23:49.265616 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcddw\" (UniqueName: \"kubernetes.io/projected/fb470409-995e-47fd-97cd-8a38152a328c-kube-api-access-xcddw\") pod \"community-operators-wgck2\" (UID: \"fb470409-995e-47fd-97cd-8a38152a328c\") " pod="openshift-marketplace/community-operators-wgck2" Nov 24 14:23:49 crc kubenswrapper[5039]: I1124 14:23:49.367365 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb470409-995e-47fd-97cd-8a38152a328c-catalog-content\") pod \"community-operators-wgck2\" (UID: \"fb470409-995e-47fd-97cd-8a38152a328c\") " pod="openshift-marketplace/community-operators-wgck2" Nov 24 14:23:49 crc kubenswrapper[5039]: I1124 14:23:49.367898 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb470409-995e-47fd-97cd-8a38152a328c-catalog-content\") pod \"community-operators-wgck2\" (UID: \"fb470409-995e-47fd-97cd-8a38152a328c\") " pod="openshift-marketplace/community-operators-wgck2" Nov 24 14:23:49 crc kubenswrapper[5039]: I1124 14:23:49.368408 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb470409-995e-47fd-97cd-8a38152a328c-utilities\") pod \"community-operators-wgck2\" (UID: \"fb470409-995e-47fd-97cd-8a38152a328c\") " pod="openshift-marketplace/community-operators-wgck2" Nov 24 14:23:49 crc kubenswrapper[5039]: I1124 14:23:49.368459 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcddw\" (UniqueName: \"kubernetes.io/projected/fb470409-995e-47fd-97cd-8a38152a328c-kube-api-access-xcddw\") pod \"community-operators-wgck2\" (UID: \"fb470409-995e-47fd-97cd-8a38152a328c\") " pod="openshift-marketplace/community-operators-wgck2" Nov 24 14:23:49 crc kubenswrapper[5039]: I1124 14:23:49.368845 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb470409-995e-47fd-97cd-8a38152a328c-utilities\") pod \"community-operators-wgck2\" (UID: \"fb470409-995e-47fd-97cd-8a38152a328c\") " pod="openshift-marketplace/community-operators-wgck2" Nov 24 14:23:49 crc kubenswrapper[5039]: I1124 14:23:49.387341 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcddw\" (UniqueName: \"kubernetes.io/projected/fb470409-995e-47fd-97cd-8a38152a328c-kube-api-access-xcddw\") pod \"community-operators-wgck2\" (UID: \"fb470409-995e-47fd-97cd-8a38152a328c\") " pod="openshift-marketplace/community-operators-wgck2" Nov 24 14:23:49 crc kubenswrapper[5039]: I1124 14:23:49.489361 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wgck2" Nov 24 14:23:50 crc kubenswrapper[5039]: I1124 14:23:50.026142 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wgck2"] Nov 24 14:23:50 crc kubenswrapper[5039]: I1124 14:23:50.720952 5039 generic.go:334] "Generic (PLEG): container finished" podID="fb470409-995e-47fd-97cd-8a38152a328c" containerID="6f5bd5dc4587cebfed23628e9715058d7abbcf89e502657d198160d43b44c234" exitCode=0 Nov 24 14:23:50 crc kubenswrapper[5039]: I1124 14:23:50.721057 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wgck2" event={"ID":"fb470409-995e-47fd-97cd-8a38152a328c","Type":"ContainerDied","Data":"6f5bd5dc4587cebfed23628e9715058d7abbcf89e502657d198160d43b44c234"} Nov 24 14:23:50 crc kubenswrapper[5039]: I1124 14:23:50.721232 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wgck2" event={"ID":"fb470409-995e-47fd-97cd-8a38152a328c","Type":"ContainerStarted","Data":"eb736226eea852de5007ad1aef7d030c01319dcadc6f585a5bd5b2e091a9f487"} Nov 24 14:23:51 crc kubenswrapper[5039]: I1124 14:23:51.731952 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wgck2" event={"ID":"fb470409-995e-47fd-97cd-8a38152a328c","Type":"ContainerStarted","Data":"cca9dcd10c6638460a525e764c76246333aa8b9da003ab29ec07cf049a1c0646"} Nov 24 14:23:52 crc kubenswrapper[5039]: I1124 14:23:52.742206 5039 generic.go:334] "Generic (PLEG): container finished" podID="fb470409-995e-47fd-97cd-8a38152a328c" containerID="cca9dcd10c6638460a525e764c76246333aa8b9da003ab29ec07cf049a1c0646" exitCode=0 Nov 24 14:23:52 crc kubenswrapper[5039]: I1124 14:23:52.742312 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wgck2" event={"ID":"fb470409-995e-47fd-97cd-8a38152a328c","Type":"ContainerDied","Data":"cca9dcd10c6638460a525e764c76246333aa8b9da003ab29ec07cf049a1c0646"} Nov 24 14:23:53 crc kubenswrapper[5039]: I1124 14:23:53.753750 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wgck2" event={"ID":"fb470409-995e-47fd-97cd-8a38152a328c","Type":"ContainerStarted","Data":"60b9e3aeb3c8153da5d969add76418e0b022f0c089bd9c36ebff7071df16d105"} Nov 24 14:23:53 crc kubenswrapper[5039]: I1124 14:23:53.778472 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wgck2" podStartSLOduration=2.318969687 podStartE2EDuration="4.77845304s" podCreationTimestamp="2025-11-24 14:23:49 +0000 UTC" firstStartedPulling="2025-11-24 14:23:50.723195732 +0000 UTC m=+3943.162320252" lastFinishedPulling="2025-11-24 14:23:53.182679105 +0000 UTC m=+3945.621803605" observedRunningTime="2025-11-24 14:23:53.771683284 +0000 UTC m=+3946.210807794" watchObservedRunningTime="2025-11-24 14:23:53.77845304 +0000 UTC m=+3946.217577540" Nov 24 14:23:59 crc kubenswrapper[5039]: I1124 14:23:59.489763 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wgck2" Nov 24 14:23:59 crc kubenswrapper[5039]: I1124 14:23:59.490421 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wgck2" Nov 24 14:23:59 crc kubenswrapper[5039]: I1124 14:23:59.536488 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wgck2" Nov 24 14:23:59 crc kubenswrapper[5039]: I1124 14:23:59.882479 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wgck2" Nov 24 14:23:59 crc kubenswrapper[5039]: I1124 14:23:59.927183 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wgck2"] Nov 24 14:24:01 crc kubenswrapper[5039]: I1124 14:24:01.847969 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wgck2" podUID="fb470409-995e-47fd-97cd-8a38152a328c" containerName="registry-server" containerID="cri-o://60b9e3aeb3c8153da5d969add76418e0b022f0c089bd9c36ebff7071df16d105" gracePeriod=2 Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.354823 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wgck2" Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.543046 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb470409-995e-47fd-97cd-8a38152a328c-catalog-content\") pod \"fb470409-995e-47fd-97cd-8a38152a328c\" (UID: \"fb470409-995e-47fd-97cd-8a38152a328c\") " Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.543259 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb470409-995e-47fd-97cd-8a38152a328c-utilities\") pod \"fb470409-995e-47fd-97cd-8a38152a328c\" (UID: \"fb470409-995e-47fd-97cd-8a38152a328c\") " Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.543330 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcddw\" (UniqueName: \"kubernetes.io/projected/fb470409-995e-47fd-97cd-8a38152a328c-kube-api-access-xcddw\") pod \"fb470409-995e-47fd-97cd-8a38152a328c\" (UID: \"fb470409-995e-47fd-97cd-8a38152a328c\") " Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.544146 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb470409-995e-47fd-97cd-8a38152a328c-utilities" (OuterVolumeSpecName: "utilities") pod "fb470409-995e-47fd-97cd-8a38152a328c" (UID: "fb470409-995e-47fd-97cd-8a38152a328c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.549296 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb470409-995e-47fd-97cd-8a38152a328c-kube-api-access-xcddw" (OuterVolumeSpecName: "kube-api-access-xcddw") pod "fb470409-995e-47fd-97cd-8a38152a328c" (UID: "fb470409-995e-47fd-97cd-8a38152a328c"). InnerVolumeSpecName "kube-api-access-xcddw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.612514 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb470409-995e-47fd-97cd-8a38152a328c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fb470409-995e-47fd-97cd-8a38152a328c" (UID: "fb470409-995e-47fd-97cd-8a38152a328c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.645939 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb470409-995e-47fd-97cd-8a38152a328c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.645997 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb470409-995e-47fd-97cd-8a38152a328c-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.646011 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcddw\" (UniqueName: \"kubernetes.io/projected/fb470409-995e-47fd-97cd-8a38152a328c-kube-api-access-xcddw\") on node \"crc\" DevicePath \"\"" Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.861860 5039 generic.go:334] "Generic (PLEG): container finished" podID="fb470409-995e-47fd-97cd-8a38152a328c" containerID="60b9e3aeb3c8153da5d969add76418e0b022f0c089bd9c36ebff7071df16d105" exitCode=0 Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.861908 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wgck2" event={"ID":"fb470409-995e-47fd-97cd-8a38152a328c","Type":"ContainerDied","Data":"60b9e3aeb3c8153da5d969add76418e0b022f0c089bd9c36ebff7071df16d105"} Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.861934 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wgck2" Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.861956 5039 scope.go:117] "RemoveContainer" containerID="60b9e3aeb3c8153da5d969add76418e0b022f0c089bd9c36ebff7071df16d105" Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.861944 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wgck2" event={"ID":"fb470409-995e-47fd-97cd-8a38152a328c","Type":"ContainerDied","Data":"eb736226eea852de5007ad1aef7d030c01319dcadc6f585a5bd5b2e091a9f487"} Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.902073 5039 scope.go:117] "RemoveContainer" containerID="cca9dcd10c6638460a525e764c76246333aa8b9da003ab29ec07cf049a1c0646" Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.914948 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wgck2"] Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.923527 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wgck2"] Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.932230 5039 scope.go:117] "RemoveContainer" containerID="6f5bd5dc4587cebfed23628e9715058d7abbcf89e502657d198160d43b44c234" Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.987436 5039 scope.go:117] "RemoveContainer" containerID="60b9e3aeb3c8153da5d969add76418e0b022f0c089bd9c36ebff7071df16d105" Nov 24 14:24:02 crc kubenswrapper[5039]: E1124 14:24:02.987854 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60b9e3aeb3c8153da5d969add76418e0b022f0c089bd9c36ebff7071df16d105\": container with ID starting with 60b9e3aeb3c8153da5d969add76418e0b022f0c089bd9c36ebff7071df16d105 not found: ID does not exist" containerID="60b9e3aeb3c8153da5d969add76418e0b022f0c089bd9c36ebff7071df16d105" Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.987900 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60b9e3aeb3c8153da5d969add76418e0b022f0c089bd9c36ebff7071df16d105"} err="failed to get container status \"60b9e3aeb3c8153da5d969add76418e0b022f0c089bd9c36ebff7071df16d105\": rpc error: code = NotFound desc = could not find container \"60b9e3aeb3c8153da5d969add76418e0b022f0c089bd9c36ebff7071df16d105\": container with ID starting with 60b9e3aeb3c8153da5d969add76418e0b022f0c089bd9c36ebff7071df16d105 not found: ID does not exist" Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.987925 5039 scope.go:117] "RemoveContainer" containerID="cca9dcd10c6638460a525e764c76246333aa8b9da003ab29ec07cf049a1c0646" Nov 24 14:24:02 crc kubenswrapper[5039]: E1124 14:24:02.988218 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cca9dcd10c6638460a525e764c76246333aa8b9da003ab29ec07cf049a1c0646\": container with ID starting with cca9dcd10c6638460a525e764c76246333aa8b9da003ab29ec07cf049a1c0646 not found: ID does not exist" containerID="cca9dcd10c6638460a525e764c76246333aa8b9da003ab29ec07cf049a1c0646" Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.988256 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cca9dcd10c6638460a525e764c76246333aa8b9da003ab29ec07cf049a1c0646"} err="failed to get container status \"cca9dcd10c6638460a525e764c76246333aa8b9da003ab29ec07cf049a1c0646\": rpc error: code = NotFound desc = could not find container \"cca9dcd10c6638460a525e764c76246333aa8b9da003ab29ec07cf049a1c0646\": container with ID starting with cca9dcd10c6638460a525e764c76246333aa8b9da003ab29ec07cf049a1c0646 not found: ID does not exist" Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.988307 5039 scope.go:117] "RemoveContainer" containerID="6f5bd5dc4587cebfed23628e9715058d7abbcf89e502657d198160d43b44c234" Nov 24 14:24:02 crc kubenswrapper[5039]: E1124 14:24:02.988588 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f5bd5dc4587cebfed23628e9715058d7abbcf89e502657d198160d43b44c234\": container with ID starting with 6f5bd5dc4587cebfed23628e9715058d7abbcf89e502657d198160d43b44c234 not found: ID does not exist" containerID="6f5bd5dc4587cebfed23628e9715058d7abbcf89e502657d198160d43b44c234" Nov 24 14:24:02 crc kubenswrapper[5039]: I1124 14:24:02.988628 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f5bd5dc4587cebfed23628e9715058d7abbcf89e502657d198160d43b44c234"} err="failed to get container status \"6f5bd5dc4587cebfed23628e9715058d7abbcf89e502657d198160d43b44c234\": rpc error: code = NotFound desc = could not find container \"6f5bd5dc4587cebfed23628e9715058d7abbcf89e502657d198160d43b44c234\": container with ID starting with 6f5bd5dc4587cebfed23628e9715058d7abbcf89e502657d198160d43b44c234 not found: ID does not exist" Nov 24 14:24:04 crc kubenswrapper[5039]: I1124 14:24:04.321169 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb470409-995e-47fd-97cd-8a38152a328c" path="/var/lib/kubelet/pods/fb470409-995e-47fd-97cd-8a38152a328c/volumes" Nov 24 14:25:00 crc kubenswrapper[5039]: I1124 14:25:00.537727 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-5b66587b55-thzjl" podUID="bd1bf6a5-309b-4960-8f37-34b006db3599" containerName="proxy-server" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 24 14:25:13 crc kubenswrapper[5039]: I1124 14:25:13.772223 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xlb2j"] Nov 24 14:25:13 crc kubenswrapper[5039]: E1124 14:25:13.773642 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb470409-995e-47fd-97cd-8a38152a328c" containerName="extract-content" Nov 24 14:25:13 crc kubenswrapper[5039]: I1124 14:25:13.773666 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb470409-995e-47fd-97cd-8a38152a328c" containerName="extract-content" Nov 24 14:25:13 crc kubenswrapper[5039]: E1124 14:25:13.773712 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb470409-995e-47fd-97cd-8a38152a328c" containerName="extract-utilities" Nov 24 14:25:13 crc kubenswrapper[5039]: I1124 14:25:13.773725 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb470409-995e-47fd-97cd-8a38152a328c" containerName="extract-utilities" Nov 24 14:25:13 crc kubenswrapper[5039]: E1124 14:25:13.773789 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb470409-995e-47fd-97cd-8a38152a328c" containerName="registry-server" Nov 24 14:25:13 crc kubenswrapper[5039]: I1124 14:25:13.773803 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb470409-995e-47fd-97cd-8a38152a328c" containerName="registry-server" Nov 24 14:25:13 crc kubenswrapper[5039]: I1124 14:25:13.774164 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb470409-995e-47fd-97cd-8a38152a328c" containerName="registry-server" Nov 24 14:25:13 crc kubenswrapper[5039]: I1124 14:25:13.776998 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xlb2j" Nov 24 14:25:13 crc kubenswrapper[5039]: I1124 14:25:13.795878 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xlb2j"] Nov 24 14:25:13 crc kubenswrapper[5039]: I1124 14:25:13.815901 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d96a1bfa-fe91-49ab-bc2f-eecd06b72db0-utilities\") pod \"redhat-marketplace-xlb2j\" (UID: \"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0\") " pod="openshift-marketplace/redhat-marketplace-xlb2j" Nov 24 14:25:13 crc kubenswrapper[5039]: I1124 14:25:13.816050 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d96a1bfa-fe91-49ab-bc2f-eecd06b72db0-catalog-content\") pod \"redhat-marketplace-xlb2j\" (UID: \"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0\") " pod="openshift-marketplace/redhat-marketplace-xlb2j" Nov 24 14:25:13 crc kubenswrapper[5039]: I1124 14:25:13.816209 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjxml\" (UniqueName: \"kubernetes.io/projected/d96a1bfa-fe91-49ab-bc2f-eecd06b72db0-kube-api-access-cjxml\") pod \"redhat-marketplace-xlb2j\" (UID: \"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0\") " pod="openshift-marketplace/redhat-marketplace-xlb2j" Nov 24 14:25:13 crc kubenswrapper[5039]: I1124 14:25:13.917765 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d96a1bfa-fe91-49ab-bc2f-eecd06b72db0-utilities\") pod \"redhat-marketplace-xlb2j\" (UID: \"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0\") " pod="openshift-marketplace/redhat-marketplace-xlb2j" Nov 24 14:25:13 crc kubenswrapper[5039]: I1124 14:25:13.917891 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d96a1bfa-fe91-49ab-bc2f-eecd06b72db0-catalog-content\") pod \"redhat-marketplace-xlb2j\" (UID: \"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0\") " pod="openshift-marketplace/redhat-marketplace-xlb2j" Nov 24 14:25:13 crc kubenswrapper[5039]: I1124 14:25:13.917993 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjxml\" (UniqueName: \"kubernetes.io/projected/d96a1bfa-fe91-49ab-bc2f-eecd06b72db0-kube-api-access-cjxml\") pod \"redhat-marketplace-xlb2j\" (UID: \"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0\") " pod="openshift-marketplace/redhat-marketplace-xlb2j" Nov 24 14:25:13 crc kubenswrapper[5039]: I1124 14:25:13.918299 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d96a1bfa-fe91-49ab-bc2f-eecd06b72db0-utilities\") pod \"redhat-marketplace-xlb2j\" (UID: \"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0\") " pod="openshift-marketplace/redhat-marketplace-xlb2j" Nov 24 14:25:13 crc kubenswrapper[5039]: I1124 14:25:13.918343 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d96a1bfa-fe91-49ab-bc2f-eecd06b72db0-catalog-content\") pod \"redhat-marketplace-xlb2j\" (UID: \"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0\") " pod="openshift-marketplace/redhat-marketplace-xlb2j" Nov 24 14:25:13 crc kubenswrapper[5039]: I1124 14:25:13.937388 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjxml\" (UniqueName: \"kubernetes.io/projected/d96a1bfa-fe91-49ab-bc2f-eecd06b72db0-kube-api-access-cjxml\") pod \"redhat-marketplace-xlb2j\" (UID: \"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0\") " pod="openshift-marketplace/redhat-marketplace-xlb2j" Nov 24 14:25:14 crc kubenswrapper[5039]: I1124 14:25:14.114136 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xlb2j" Nov 24 14:25:14 crc kubenswrapper[5039]: I1124 14:25:14.644220 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xlb2j"] Nov 24 14:25:15 crc kubenswrapper[5039]: I1124 14:25:15.661686 5039 generic.go:334] "Generic (PLEG): container finished" podID="d96a1bfa-fe91-49ab-bc2f-eecd06b72db0" containerID="b2608ebb60c9cbec8ea6cf889985a4c0ae126c3481771961f106fb78fbed66a9" exitCode=0 Nov 24 14:25:15 crc kubenswrapper[5039]: I1124 14:25:15.661752 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xlb2j" event={"ID":"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0","Type":"ContainerDied","Data":"b2608ebb60c9cbec8ea6cf889985a4c0ae126c3481771961f106fb78fbed66a9"} Nov 24 14:25:15 crc kubenswrapper[5039]: I1124 14:25:15.662053 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xlb2j" event={"ID":"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0","Type":"ContainerStarted","Data":"611449bb3a8ab6320c918c76b5e6bbcb637e23ec1093147431dddc42de0bae54"} Nov 24 14:25:15 crc kubenswrapper[5039]: I1124 14:25:15.664484 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 14:25:16 crc kubenswrapper[5039]: I1124 14:25:16.675574 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xlb2j" event={"ID":"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0","Type":"ContainerStarted","Data":"be75d3d2dd25b6e63456d775ebbf6a0cdeebe938f1919431c6ec509ebd126f3b"} Nov 24 14:25:17 crc kubenswrapper[5039]: I1124 14:25:17.689161 5039 generic.go:334] "Generic (PLEG): container finished" podID="d96a1bfa-fe91-49ab-bc2f-eecd06b72db0" containerID="be75d3d2dd25b6e63456d775ebbf6a0cdeebe938f1919431c6ec509ebd126f3b" exitCode=0 Nov 24 14:25:17 crc kubenswrapper[5039]: I1124 14:25:17.689327 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xlb2j" event={"ID":"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0","Type":"ContainerDied","Data":"be75d3d2dd25b6e63456d775ebbf6a0cdeebe938f1919431c6ec509ebd126f3b"} Nov 24 14:25:18 crc kubenswrapper[5039]: I1124 14:25:18.707801 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xlb2j" event={"ID":"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0","Type":"ContainerStarted","Data":"0f6529f9823a77f7b5539df09b83c69a9c25a851dd018843d7ceec64991e882a"} Nov 24 14:25:18 crc kubenswrapper[5039]: I1124 14:25:18.733794 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xlb2j" podStartSLOduration=3.167822322 podStartE2EDuration="5.733771778s" podCreationTimestamp="2025-11-24 14:25:13 +0000 UTC" firstStartedPulling="2025-11-24 14:25:15.664250652 +0000 UTC m=+4028.103375152" lastFinishedPulling="2025-11-24 14:25:18.230200068 +0000 UTC m=+4030.669324608" observedRunningTime="2025-11-24 14:25:18.729891453 +0000 UTC m=+4031.169015993" watchObservedRunningTime="2025-11-24 14:25:18.733771778 +0000 UTC m=+4031.172896288" Nov 24 14:25:20 crc kubenswrapper[5039]: I1124 14:25:20.102332 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:25:20 crc kubenswrapper[5039]: I1124 14:25:20.102824 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:25:24 crc kubenswrapper[5039]: I1124 14:25:24.114298 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xlb2j" Nov 24 14:25:24 crc kubenswrapper[5039]: I1124 14:25:24.115405 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xlb2j" Nov 24 14:25:24 crc kubenswrapper[5039]: I1124 14:25:24.165081 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xlb2j" Nov 24 14:25:24 crc kubenswrapper[5039]: I1124 14:25:24.853058 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xlb2j" Nov 24 14:25:24 crc kubenswrapper[5039]: I1124 14:25:24.917895 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xlb2j"] Nov 24 14:25:26 crc kubenswrapper[5039]: I1124 14:25:26.809131 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xlb2j" podUID="d96a1bfa-fe91-49ab-bc2f-eecd06b72db0" containerName="registry-server" containerID="cri-o://0f6529f9823a77f7b5539df09b83c69a9c25a851dd018843d7ceec64991e882a" gracePeriod=2 Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.331120 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xlb2j" Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.434907 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cjxml\" (UniqueName: \"kubernetes.io/projected/d96a1bfa-fe91-49ab-bc2f-eecd06b72db0-kube-api-access-cjxml\") pod \"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0\" (UID: \"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0\") " Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.435057 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d96a1bfa-fe91-49ab-bc2f-eecd06b72db0-utilities\") pod \"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0\" (UID: \"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0\") " Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.435094 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d96a1bfa-fe91-49ab-bc2f-eecd06b72db0-catalog-content\") pod \"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0\" (UID: \"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0\") " Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.436514 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d96a1bfa-fe91-49ab-bc2f-eecd06b72db0-utilities" (OuterVolumeSpecName: "utilities") pod "d96a1bfa-fe91-49ab-bc2f-eecd06b72db0" (UID: "d96a1bfa-fe91-49ab-bc2f-eecd06b72db0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.442246 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d96a1bfa-fe91-49ab-bc2f-eecd06b72db0-kube-api-access-cjxml" (OuterVolumeSpecName: "kube-api-access-cjxml") pod "d96a1bfa-fe91-49ab-bc2f-eecd06b72db0" (UID: "d96a1bfa-fe91-49ab-bc2f-eecd06b72db0"). InnerVolumeSpecName "kube-api-access-cjxml". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.457048 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d96a1bfa-fe91-49ab-bc2f-eecd06b72db0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d96a1bfa-fe91-49ab-bc2f-eecd06b72db0" (UID: "d96a1bfa-fe91-49ab-bc2f-eecd06b72db0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.537781 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cjxml\" (UniqueName: \"kubernetes.io/projected/d96a1bfa-fe91-49ab-bc2f-eecd06b72db0-kube-api-access-cjxml\") on node \"crc\" DevicePath \"\"" Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.537817 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d96a1bfa-fe91-49ab-bc2f-eecd06b72db0-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.537828 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d96a1bfa-fe91-49ab-bc2f-eecd06b72db0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.824638 5039 generic.go:334] "Generic (PLEG): container finished" podID="d96a1bfa-fe91-49ab-bc2f-eecd06b72db0" containerID="0f6529f9823a77f7b5539df09b83c69a9c25a851dd018843d7ceec64991e882a" exitCode=0 Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.824686 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xlb2j" event={"ID":"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0","Type":"ContainerDied","Data":"0f6529f9823a77f7b5539df09b83c69a9c25a851dd018843d7ceec64991e882a"} Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.824726 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xlb2j" event={"ID":"d96a1bfa-fe91-49ab-bc2f-eecd06b72db0","Type":"ContainerDied","Data":"611449bb3a8ab6320c918c76b5e6bbcb637e23ec1093147431dddc42de0bae54"} Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.824749 5039 scope.go:117] "RemoveContainer" containerID="0f6529f9823a77f7b5539df09b83c69a9c25a851dd018843d7ceec64991e882a" Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.824787 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xlb2j" Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.849176 5039 scope.go:117] "RemoveContainer" containerID="be75d3d2dd25b6e63456d775ebbf6a0cdeebe938f1919431c6ec509ebd126f3b" Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.869214 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xlb2j"] Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.879612 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xlb2j"] Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.890010 5039 scope.go:117] "RemoveContainer" containerID="b2608ebb60c9cbec8ea6cf889985a4c0ae126c3481771961f106fb78fbed66a9" Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.926826 5039 scope.go:117] "RemoveContainer" containerID="0f6529f9823a77f7b5539df09b83c69a9c25a851dd018843d7ceec64991e882a" Nov 24 14:25:27 crc kubenswrapper[5039]: E1124 14:25:27.929958 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f6529f9823a77f7b5539df09b83c69a9c25a851dd018843d7ceec64991e882a\": container with ID starting with 0f6529f9823a77f7b5539df09b83c69a9c25a851dd018843d7ceec64991e882a not found: ID does not exist" containerID="0f6529f9823a77f7b5539df09b83c69a9c25a851dd018843d7ceec64991e882a" Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.929990 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f6529f9823a77f7b5539df09b83c69a9c25a851dd018843d7ceec64991e882a"} err="failed to get container status \"0f6529f9823a77f7b5539df09b83c69a9c25a851dd018843d7ceec64991e882a\": rpc error: code = NotFound desc = could not find container \"0f6529f9823a77f7b5539df09b83c69a9c25a851dd018843d7ceec64991e882a\": container with ID starting with 0f6529f9823a77f7b5539df09b83c69a9c25a851dd018843d7ceec64991e882a not found: ID does not exist" Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.930012 5039 scope.go:117] "RemoveContainer" containerID="be75d3d2dd25b6e63456d775ebbf6a0cdeebe938f1919431c6ec509ebd126f3b" Nov 24 14:25:27 crc kubenswrapper[5039]: E1124 14:25:27.933053 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be75d3d2dd25b6e63456d775ebbf6a0cdeebe938f1919431c6ec509ebd126f3b\": container with ID starting with be75d3d2dd25b6e63456d775ebbf6a0cdeebe938f1919431c6ec509ebd126f3b not found: ID does not exist" containerID="be75d3d2dd25b6e63456d775ebbf6a0cdeebe938f1919431c6ec509ebd126f3b" Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.933097 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be75d3d2dd25b6e63456d775ebbf6a0cdeebe938f1919431c6ec509ebd126f3b"} err="failed to get container status \"be75d3d2dd25b6e63456d775ebbf6a0cdeebe938f1919431c6ec509ebd126f3b\": rpc error: code = NotFound desc = could not find container \"be75d3d2dd25b6e63456d775ebbf6a0cdeebe938f1919431c6ec509ebd126f3b\": container with ID starting with be75d3d2dd25b6e63456d775ebbf6a0cdeebe938f1919431c6ec509ebd126f3b not found: ID does not exist" Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.933115 5039 scope.go:117] "RemoveContainer" containerID="b2608ebb60c9cbec8ea6cf889985a4c0ae126c3481771961f106fb78fbed66a9" Nov 24 14:25:27 crc kubenswrapper[5039]: E1124 14:25:27.933341 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2608ebb60c9cbec8ea6cf889985a4c0ae126c3481771961f106fb78fbed66a9\": container with ID starting with b2608ebb60c9cbec8ea6cf889985a4c0ae126c3481771961f106fb78fbed66a9 not found: ID does not exist" containerID="b2608ebb60c9cbec8ea6cf889985a4c0ae126c3481771961f106fb78fbed66a9" Nov 24 14:25:27 crc kubenswrapper[5039]: I1124 14:25:27.933365 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2608ebb60c9cbec8ea6cf889985a4c0ae126c3481771961f106fb78fbed66a9"} err="failed to get container status \"b2608ebb60c9cbec8ea6cf889985a4c0ae126c3481771961f106fb78fbed66a9\": rpc error: code = NotFound desc = could not find container \"b2608ebb60c9cbec8ea6cf889985a4c0ae126c3481771961f106fb78fbed66a9\": container with ID starting with b2608ebb60c9cbec8ea6cf889985a4c0ae126c3481771961f106fb78fbed66a9 not found: ID does not exist" Nov 24 14:25:28 crc kubenswrapper[5039]: I1124 14:25:28.321750 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d96a1bfa-fe91-49ab-bc2f-eecd06b72db0" path="/var/lib/kubelet/pods/d96a1bfa-fe91-49ab-bc2f-eecd06b72db0/volumes" Nov 24 14:25:50 crc kubenswrapper[5039]: I1124 14:25:50.102082 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:25:50 crc kubenswrapper[5039]: I1124 14:25:50.102981 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:26:20 crc kubenswrapper[5039]: I1124 14:26:20.101705 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:26:20 crc kubenswrapper[5039]: I1124 14:26:20.102314 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:26:20 crc kubenswrapper[5039]: I1124 14:26:20.102366 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 14:26:20 crc kubenswrapper[5039]: I1124 14:26:20.103286 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a44b192acf46995fecc0c4d4f9cc70c29cd209cc1b45f88a225e15065c976530"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 14:26:20 crc kubenswrapper[5039]: I1124 14:26:20.103356 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://a44b192acf46995fecc0c4d4f9cc70c29cd209cc1b45f88a225e15065c976530" gracePeriod=600 Nov 24 14:26:20 crc kubenswrapper[5039]: I1124 14:26:20.386773 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="a44b192acf46995fecc0c4d4f9cc70c29cd209cc1b45f88a225e15065c976530" exitCode=0 Nov 24 14:26:20 crc kubenswrapper[5039]: I1124 14:26:20.386869 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"a44b192acf46995fecc0c4d4f9cc70c29cd209cc1b45f88a225e15065c976530"} Nov 24 14:26:20 crc kubenswrapper[5039]: I1124 14:26:20.387329 5039 scope.go:117] "RemoveContainer" containerID="85b1016c004ae353959b825a33183a4d593cf8ea611ea33d857725bc68fdf52a" Nov 24 14:26:21 crc kubenswrapper[5039]: I1124 14:26:21.403228 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40"} Nov 24 14:26:29 crc kubenswrapper[5039]: I1124 14:26:29.901362 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8gzqg"] Nov 24 14:26:29 crc kubenswrapper[5039]: E1124 14:26:29.918976 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d96a1bfa-fe91-49ab-bc2f-eecd06b72db0" containerName="registry-server" Nov 24 14:26:29 crc kubenswrapper[5039]: I1124 14:26:29.920496 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="d96a1bfa-fe91-49ab-bc2f-eecd06b72db0" containerName="registry-server" Nov 24 14:26:29 crc kubenswrapper[5039]: E1124 14:26:29.920589 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d96a1bfa-fe91-49ab-bc2f-eecd06b72db0" containerName="extract-utilities" Nov 24 14:26:29 crc kubenswrapper[5039]: I1124 14:26:29.920600 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="d96a1bfa-fe91-49ab-bc2f-eecd06b72db0" containerName="extract-utilities" Nov 24 14:26:29 crc kubenswrapper[5039]: E1124 14:26:29.920643 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d96a1bfa-fe91-49ab-bc2f-eecd06b72db0" containerName="extract-content" Nov 24 14:26:29 crc kubenswrapper[5039]: I1124 14:26:29.920652 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="d96a1bfa-fe91-49ab-bc2f-eecd06b72db0" containerName="extract-content" Nov 24 14:26:29 crc kubenswrapper[5039]: I1124 14:26:29.921304 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="d96a1bfa-fe91-49ab-bc2f-eecd06b72db0" containerName="registry-server" Nov 24 14:26:29 crc kubenswrapper[5039]: I1124 14:26:29.931108 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8gzqg"] Nov 24 14:26:29 crc kubenswrapper[5039]: I1124 14:26:29.931258 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8gzqg" Nov 24 14:26:30 crc kubenswrapper[5039]: I1124 14:26:30.112369 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eda0ee9b-8020-45b8-ac42-d063362d4bf1-catalog-content\") pod \"certified-operators-8gzqg\" (UID: \"eda0ee9b-8020-45b8-ac42-d063362d4bf1\") " pod="openshift-marketplace/certified-operators-8gzqg" Nov 24 14:26:30 crc kubenswrapper[5039]: I1124 14:26:30.112706 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gctnn\" (UniqueName: \"kubernetes.io/projected/eda0ee9b-8020-45b8-ac42-d063362d4bf1-kube-api-access-gctnn\") pod \"certified-operators-8gzqg\" (UID: \"eda0ee9b-8020-45b8-ac42-d063362d4bf1\") " pod="openshift-marketplace/certified-operators-8gzqg" Nov 24 14:26:30 crc kubenswrapper[5039]: I1124 14:26:30.113079 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eda0ee9b-8020-45b8-ac42-d063362d4bf1-utilities\") pod \"certified-operators-8gzqg\" (UID: \"eda0ee9b-8020-45b8-ac42-d063362d4bf1\") " pod="openshift-marketplace/certified-operators-8gzqg" Nov 24 14:26:30 crc kubenswrapper[5039]: I1124 14:26:30.215334 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eda0ee9b-8020-45b8-ac42-d063362d4bf1-utilities\") pod \"certified-operators-8gzqg\" (UID: \"eda0ee9b-8020-45b8-ac42-d063362d4bf1\") " pod="openshift-marketplace/certified-operators-8gzqg" Nov 24 14:26:30 crc kubenswrapper[5039]: I1124 14:26:30.215408 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eda0ee9b-8020-45b8-ac42-d063362d4bf1-catalog-content\") pod \"certified-operators-8gzqg\" (UID: \"eda0ee9b-8020-45b8-ac42-d063362d4bf1\") " pod="openshift-marketplace/certified-operators-8gzqg" Nov 24 14:26:30 crc kubenswrapper[5039]: I1124 14:26:30.215585 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gctnn\" (UniqueName: \"kubernetes.io/projected/eda0ee9b-8020-45b8-ac42-d063362d4bf1-kube-api-access-gctnn\") pod \"certified-operators-8gzqg\" (UID: \"eda0ee9b-8020-45b8-ac42-d063362d4bf1\") " pod="openshift-marketplace/certified-operators-8gzqg" Nov 24 14:26:30 crc kubenswrapper[5039]: I1124 14:26:30.216061 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eda0ee9b-8020-45b8-ac42-d063362d4bf1-utilities\") pod \"certified-operators-8gzqg\" (UID: \"eda0ee9b-8020-45b8-ac42-d063362d4bf1\") " pod="openshift-marketplace/certified-operators-8gzqg" Nov 24 14:26:30 crc kubenswrapper[5039]: I1124 14:26:30.216066 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eda0ee9b-8020-45b8-ac42-d063362d4bf1-catalog-content\") pod \"certified-operators-8gzqg\" (UID: \"eda0ee9b-8020-45b8-ac42-d063362d4bf1\") " pod="openshift-marketplace/certified-operators-8gzqg" Nov 24 14:26:30 crc kubenswrapper[5039]: I1124 14:26:30.237027 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gctnn\" (UniqueName: \"kubernetes.io/projected/eda0ee9b-8020-45b8-ac42-d063362d4bf1-kube-api-access-gctnn\") pod \"certified-operators-8gzqg\" (UID: \"eda0ee9b-8020-45b8-ac42-d063362d4bf1\") " pod="openshift-marketplace/certified-operators-8gzqg" Nov 24 14:26:30 crc kubenswrapper[5039]: I1124 14:26:30.258866 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8gzqg" Nov 24 14:26:30 crc kubenswrapper[5039]: I1124 14:26:30.793149 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8gzqg"] Nov 24 14:26:31 crc kubenswrapper[5039]: I1124 14:26:31.530703 5039 generic.go:334] "Generic (PLEG): container finished" podID="eda0ee9b-8020-45b8-ac42-d063362d4bf1" containerID="f34bb15791ca824fc2d9112608182b11e2ff3a595366cb25a38ddc08ce54f97b" exitCode=0 Nov 24 14:26:31 crc kubenswrapper[5039]: I1124 14:26:31.530778 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8gzqg" event={"ID":"eda0ee9b-8020-45b8-ac42-d063362d4bf1","Type":"ContainerDied","Data":"f34bb15791ca824fc2d9112608182b11e2ff3a595366cb25a38ddc08ce54f97b"} Nov 24 14:26:31 crc kubenswrapper[5039]: I1124 14:26:31.532072 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8gzqg" event={"ID":"eda0ee9b-8020-45b8-ac42-d063362d4bf1","Type":"ContainerStarted","Data":"973e62f2484da7d88627a35596dca2a92bbbd3146142c3d83cfb25f9d97eff75"} Nov 24 14:26:32 crc kubenswrapper[5039]: I1124 14:26:32.546269 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8gzqg" event={"ID":"eda0ee9b-8020-45b8-ac42-d063362d4bf1","Type":"ContainerStarted","Data":"02b561c81efb9e1c7e92505750f69b0034df80deb489003e6a2327fcc59ca028"} Nov 24 14:26:33 crc kubenswrapper[5039]: I1124 14:26:33.564282 5039 generic.go:334] "Generic (PLEG): container finished" podID="eda0ee9b-8020-45b8-ac42-d063362d4bf1" containerID="02b561c81efb9e1c7e92505750f69b0034df80deb489003e6a2327fcc59ca028" exitCode=0 Nov 24 14:26:33 crc kubenswrapper[5039]: I1124 14:26:33.564595 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8gzqg" event={"ID":"eda0ee9b-8020-45b8-ac42-d063362d4bf1","Type":"ContainerDied","Data":"02b561c81efb9e1c7e92505750f69b0034df80deb489003e6a2327fcc59ca028"} Nov 24 14:26:34 crc kubenswrapper[5039]: I1124 14:26:34.577117 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8gzqg" event={"ID":"eda0ee9b-8020-45b8-ac42-d063362d4bf1","Type":"ContainerStarted","Data":"9c597e8f7e9f06c5e70da63638eb5067879d00b98e9f21f0649a7d7d47c1d2fe"} Nov 24 14:26:34 crc kubenswrapper[5039]: I1124 14:26:34.602419 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8gzqg" podStartSLOduration=3.130418298 podStartE2EDuration="5.602403867s" podCreationTimestamp="2025-11-24 14:26:29 +0000 UTC" firstStartedPulling="2025-11-24 14:26:31.533300971 +0000 UTC m=+4103.972425471" lastFinishedPulling="2025-11-24 14:26:34.00528654 +0000 UTC m=+4106.444411040" observedRunningTime="2025-11-24 14:26:34.602349526 +0000 UTC m=+4107.041474036" watchObservedRunningTime="2025-11-24 14:26:34.602403867 +0000 UTC m=+4107.041528367" Nov 24 14:26:39 crc kubenswrapper[5039]: I1124 14:26:39.628493 5039 generic.go:334] "Generic (PLEG): container finished" podID="d1d48eba-5a90-4ca3-b298-f19175f93608" containerID="bafe97c8d5d59a9b31a2cd4f7889b9e11c7d8162b6121d4e013bec0dc0d7bd4e" exitCode=0 Nov 24 14:26:39 crc kubenswrapper[5039]: I1124 14:26:39.629449 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" event={"ID":"d1d48eba-5a90-4ca3-b298-f19175f93608","Type":"ContainerDied","Data":"bafe97c8d5d59a9b31a2cd4f7889b9e11c7d8162b6121d4e013bec0dc0d7bd4e"} Nov 24 14:26:40 crc kubenswrapper[5039]: I1124 14:26:40.259919 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8gzqg" Nov 24 14:26:40 crc kubenswrapper[5039]: I1124 14:26:40.260855 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8gzqg" Nov 24 14:26:40 crc kubenswrapper[5039]: I1124 14:26:40.326644 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8gzqg" Nov 24 14:26:40 crc kubenswrapper[5039]: I1124 14:26:40.703062 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8gzqg" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.070324 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.254776 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-inventory\") pod \"d1d48eba-5a90-4ca3-b298-f19175f93608\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.254854 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceilometer-compute-config-data-1\") pod \"d1d48eba-5a90-4ca3-b298-f19175f93608\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.254962 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-telemetry-combined-ca-bundle\") pod \"d1d48eba-5a90-4ca3-b298-f19175f93608\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.255021 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceph\") pod \"d1d48eba-5a90-4ca3-b298-f19175f93608\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.255065 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceilometer-compute-config-data-2\") pod \"d1d48eba-5a90-4ca3-b298-f19175f93608\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.255110 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zpj8v\" (UniqueName: \"kubernetes.io/projected/d1d48eba-5a90-4ca3-b298-f19175f93608-kube-api-access-zpj8v\") pod \"d1d48eba-5a90-4ca3-b298-f19175f93608\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.255176 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ssh-key\") pod \"d1d48eba-5a90-4ca3-b298-f19175f93608\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.255212 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceilometer-compute-config-data-0\") pod \"d1d48eba-5a90-4ca3-b298-f19175f93608\" (UID: \"d1d48eba-5a90-4ca3-b298-f19175f93608\") " Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.261674 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceph" (OuterVolumeSpecName: "ceph") pod "d1d48eba-5a90-4ca3-b298-f19175f93608" (UID: "d1d48eba-5a90-4ca3-b298-f19175f93608"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.262779 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1d48eba-5a90-4ca3-b298-f19175f93608-kube-api-access-zpj8v" (OuterVolumeSpecName: "kube-api-access-zpj8v") pod "d1d48eba-5a90-4ca3-b298-f19175f93608" (UID: "d1d48eba-5a90-4ca3-b298-f19175f93608"). InnerVolumeSpecName "kube-api-access-zpj8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.264621 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "d1d48eba-5a90-4ca3-b298-f19175f93608" (UID: "d1d48eba-5a90-4ca3-b298-f19175f93608"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.292481 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d1d48eba-5a90-4ca3-b298-f19175f93608" (UID: "d1d48eba-5a90-4ca3-b298-f19175f93608"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.295326 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "d1d48eba-5a90-4ca3-b298-f19175f93608" (UID: "d1d48eba-5a90-4ca3-b298-f19175f93608"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.300304 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-inventory" (OuterVolumeSpecName: "inventory") pod "d1d48eba-5a90-4ca3-b298-f19175f93608" (UID: "d1d48eba-5a90-4ca3-b298-f19175f93608"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.301763 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "d1d48eba-5a90-4ca3-b298-f19175f93608" (UID: "d1d48eba-5a90-4ca3-b298-f19175f93608"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.303796 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "d1d48eba-5a90-4ca3-b298-f19175f93608" (UID: "d1d48eba-5a90-4ca3-b298-f19175f93608"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.357900 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.357944 5039 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.357955 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zpj8v\" (UniqueName: \"kubernetes.io/projected/d1d48eba-5a90-4ca3-b298-f19175f93608-kube-api-access-zpj8v\") on node \"crc\" DevicePath \"\"" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.357964 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.357973 5039 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.357981 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.357992 5039 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.358002 5039 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1d48eba-5a90-4ca3-b298-f19175f93608-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.649665 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.650700 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2" event={"ID":"d1d48eba-5a90-4ca3-b298-f19175f93608","Type":"ContainerDied","Data":"2bdb613c8bb8f215ffbeb7e8ea5c5badd6415ac2514b1de72fd3dda4316bad0c"} Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.650751 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2bdb613c8bb8f215ffbeb7e8ea5c5badd6415ac2514b1de72fd3dda4316bad0c" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.744076 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5"] Nov 24 14:26:41 crc kubenswrapper[5039]: E1124 14:26:41.744650 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1d48eba-5a90-4ca3-b298-f19175f93608" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.744670 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1d48eba-5a90-4ca3-b298-f19175f93608" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.744950 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1d48eba-5a90-4ca3-b298-f19175f93608" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.746118 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.748696 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-ipmi-config-data" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.749068 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.749231 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.749660 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.750171 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.750594 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.758193 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5"] Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.868720 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.868815 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ssh-key\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.868879 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.869202 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.869402 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.869444 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcgkv\" (UniqueName: \"kubernetes.io/projected/d8589776-bb1f-42ea-8bfa-7053520c66b7-kube-api-access-jcgkv\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.869593 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceph\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.869681 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.971608 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.971750 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.971807 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.971829 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcgkv\" (UniqueName: \"kubernetes.io/projected/d8589776-bb1f-42ea-8bfa-7053520c66b7-kube-api-access-jcgkv\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.971868 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceph\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.971894 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.971920 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.971946 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ssh-key\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.976187 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.976267 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.977492 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ssh-key\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.977939 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceph\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.978006 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.978154 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.979316 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:41 crc kubenswrapper[5039]: I1124 14:26:41.988919 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcgkv\" (UniqueName: \"kubernetes.io/projected/d8589776-bb1f-42ea-8bfa-7053520c66b7-kube-api-access-jcgkv\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:42 crc kubenswrapper[5039]: I1124 14:26:42.073587 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:26:42 crc kubenswrapper[5039]: I1124 14:26:42.640500 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5"] Nov 24 14:26:42 crc kubenswrapper[5039]: I1124 14:26:42.664232 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" event={"ID":"d8589776-bb1f-42ea-8bfa-7053520c66b7","Type":"ContainerStarted","Data":"5599be1d38ef10281ae11da5a311212aa7346a9a13156088474f128f39b8befb"} Nov 24 14:26:43 crc kubenswrapper[5039]: I1124 14:26:43.057089 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8gzqg"] Nov 24 14:26:43 crc kubenswrapper[5039]: I1124 14:26:43.673891 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-8gzqg" podUID="eda0ee9b-8020-45b8-ac42-d063362d4bf1" containerName="registry-server" containerID="cri-o://9c597e8f7e9f06c5e70da63638eb5067879d00b98e9f21f0649a7d7d47c1d2fe" gracePeriod=2 Nov 24 14:26:44 crc kubenswrapper[5039]: I1124 14:26:44.206301 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8gzqg" Nov 24 14:26:44 crc kubenswrapper[5039]: I1124 14:26:44.320475 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eda0ee9b-8020-45b8-ac42-d063362d4bf1-utilities\") pod \"eda0ee9b-8020-45b8-ac42-d063362d4bf1\" (UID: \"eda0ee9b-8020-45b8-ac42-d063362d4bf1\") " Nov 24 14:26:44 crc kubenswrapper[5039]: I1124 14:26:44.320647 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eda0ee9b-8020-45b8-ac42-d063362d4bf1-catalog-content\") pod \"eda0ee9b-8020-45b8-ac42-d063362d4bf1\" (UID: \"eda0ee9b-8020-45b8-ac42-d063362d4bf1\") " Nov 24 14:26:44 crc kubenswrapper[5039]: I1124 14:26:44.320852 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gctnn\" (UniqueName: \"kubernetes.io/projected/eda0ee9b-8020-45b8-ac42-d063362d4bf1-kube-api-access-gctnn\") pod \"eda0ee9b-8020-45b8-ac42-d063362d4bf1\" (UID: \"eda0ee9b-8020-45b8-ac42-d063362d4bf1\") " Nov 24 14:26:44 crc kubenswrapper[5039]: I1124 14:26:44.322259 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eda0ee9b-8020-45b8-ac42-d063362d4bf1-utilities" (OuterVolumeSpecName: "utilities") pod "eda0ee9b-8020-45b8-ac42-d063362d4bf1" (UID: "eda0ee9b-8020-45b8-ac42-d063362d4bf1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:26:44 crc kubenswrapper[5039]: I1124 14:26:44.326847 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eda0ee9b-8020-45b8-ac42-d063362d4bf1-kube-api-access-gctnn" (OuterVolumeSpecName: "kube-api-access-gctnn") pod "eda0ee9b-8020-45b8-ac42-d063362d4bf1" (UID: "eda0ee9b-8020-45b8-ac42-d063362d4bf1"). InnerVolumeSpecName "kube-api-access-gctnn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:26:44 crc kubenswrapper[5039]: I1124 14:26:44.367737 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eda0ee9b-8020-45b8-ac42-d063362d4bf1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eda0ee9b-8020-45b8-ac42-d063362d4bf1" (UID: "eda0ee9b-8020-45b8-ac42-d063362d4bf1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:26:44 crc kubenswrapper[5039]: I1124 14:26:44.423619 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gctnn\" (UniqueName: \"kubernetes.io/projected/eda0ee9b-8020-45b8-ac42-d063362d4bf1-kube-api-access-gctnn\") on node \"crc\" DevicePath \"\"" Nov 24 14:26:44 crc kubenswrapper[5039]: I1124 14:26:44.423657 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eda0ee9b-8020-45b8-ac42-d063362d4bf1-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:26:44 crc kubenswrapper[5039]: I1124 14:26:44.423670 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eda0ee9b-8020-45b8-ac42-d063362d4bf1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:26:44 crc kubenswrapper[5039]: I1124 14:26:44.689361 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" event={"ID":"d8589776-bb1f-42ea-8bfa-7053520c66b7","Type":"ContainerStarted","Data":"e837f946f1b412970dd485704539f48d3f48edc433d9e4bd25d72bbfa9a5e0db"} Nov 24 14:26:44 crc kubenswrapper[5039]: I1124 14:26:44.693905 5039 generic.go:334] "Generic (PLEG): container finished" podID="eda0ee9b-8020-45b8-ac42-d063362d4bf1" containerID="9c597e8f7e9f06c5e70da63638eb5067879d00b98e9f21f0649a7d7d47c1d2fe" exitCode=0 Nov 24 14:26:44 crc kubenswrapper[5039]: I1124 14:26:44.693953 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8gzqg" Nov 24 14:26:44 crc kubenswrapper[5039]: I1124 14:26:44.693982 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8gzqg" event={"ID":"eda0ee9b-8020-45b8-ac42-d063362d4bf1","Type":"ContainerDied","Data":"9c597e8f7e9f06c5e70da63638eb5067879d00b98e9f21f0649a7d7d47c1d2fe"} Nov 24 14:26:44 crc kubenswrapper[5039]: I1124 14:26:44.694025 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8gzqg" event={"ID":"eda0ee9b-8020-45b8-ac42-d063362d4bf1","Type":"ContainerDied","Data":"973e62f2484da7d88627a35596dca2a92bbbd3146142c3d83cfb25f9d97eff75"} Nov 24 14:26:44 crc kubenswrapper[5039]: I1124 14:26:44.694045 5039 scope.go:117] "RemoveContainer" containerID="9c597e8f7e9f06c5e70da63638eb5067879d00b98e9f21f0649a7d7d47c1d2fe" Nov 24 14:26:44 crc kubenswrapper[5039]: I1124 14:26:44.715890 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" podStartSLOduration=2.548796667 podStartE2EDuration="3.715872157s" podCreationTimestamp="2025-11-24 14:26:41 +0000 UTC" firstStartedPulling="2025-11-24 14:26:42.642683727 +0000 UTC m=+4115.081808247" lastFinishedPulling="2025-11-24 14:26:43.809759237 +0000 UTC m=+4116.248883737" observedRunningTime="2025-11-24 14:26:44.71396328 +0000 UTC m=+4117.153087860" watchObservedRunningTime="2025-11-24 14:26:44.715872157 +0000 UTC m=+4117.154996657" Nov 24 14:26:44 crc kubenswrapper[5039]: I1124 14:26:44.744637 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8gzqg"] Nov 24 14:26:44 crc kubenswrapper[5039]: I1124 14:26:44.747533 5039 scope.go:117] "RemoveContainer" containerID="02b561c81efb9e1c7e92505750f69b0034df80deb489003e6a2327fcc59ca028" Nov 24 14:26:44 crc kubenswrapper[5039]: I1124 14:26:44.752260 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-8gzqg"] Nov 24 14:26:45 crc kubenswrapper[5039]: I1124 14:26:45.508324 5039 scope.go:117] "RemoveContainer" containerID="f34bb15791ca824fc2d9112608182b11e2ff3a595366cb25a38ddc08ce54f97b" Nov 24 14:26:45 crc kubenswrapper[5039]: I1124 14:26:45.703410 5039 scope.go:117] "RemoveContainer" containerID="9c597e8f7e9f06c5e70da63638eb5067879d00b98e9f21f0649a7d7d47c1d2fe" Nov 24 14:26:45 crc kubenswrapper[5039]: E1124 14:26:45.704316 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c597e8f7e9f06c5e70da63638eb5067879d00b98e9f21f0649a7d7d47c1d2fe\": container with ID starting with 9c597e8f7e9f06c5e70da63638eb5067879d00b98e9f21f0649a7d7d47c1d2fe not found: ID does not exist" containerID="9c597e8f7e9f06c5e70da63638eb5067879d00b98e9f21f0649a7d7d47c1d2fe" Nov 24 14:26:45 crc kubenswrapper[5039]: I1124 14:26:45.704369 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c597e8f7e9f06c5e70da63638eb5067879d00b98e9f21f0649a7d7d47c1d2fe"} err="failed to get container status \"9c597e8f7e9f06c5e70da63638eb5067879d00b98e9f21f0649a7d7d47c1d2fe\": rpc error: code = NotFound desc = could not find container \"9c597e8f7e9f06c5e70da63638eb5067879d00b98e9f21f0649a7d7d47c1d2fe\": container with ID starting with 9c597e8f7e9f06c5e70da63638eb5067879d00b98e9f21f0649a7d7d47c1d2fe not found: ID does not exist" Nov 24 14:26:45 crc kubenswrapper[5039]: I1124 14:26:45.704404 5039 scope.go:117] "RemoveContainer" containerID="02b561c81efb9e1c7e92505750f69b0034df80deb489003e6a2327fcc59ca028" Nov 24 14:26:45 crc kubenswrapper[5039]: E1124 14:26:45.704864 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02b561c81efb9e1c7e92505750f69b0034df80deb489003e6a2327fcc59ca028\": container with ID starting with 02b561c81efb9e1c7e92505750f69b0034df80deb489003e6a2327fcc59ca028 not found: ID does not exist" containerID="02b561c81efb9e1c7e92505750f69b0034df80deb489003e6a2327fcc59ca028" Nov 24 14:26:45 crc kubenswrapper[5039]: I1124 14:26:45.704943 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02b561c81efb9e1c7e92505750f69b0034df80deb489003e6a2327fcc59ca028"} err="failed to get container status \"02b561c81efb9e1c7e92505750f69b0034df80deb489003e6a2327fcc59ca028\": rpc error: code = NotFound desc = could not find container \"02b561c81efb9e1c7e92505750f69b0034df80deb489003e6a2327fcc59ca028\": container with ID starting with 02b561c81efb9e1c7e92505750f69b0034df80deb489003e6a2327fcc59ca028 not found: ID does not exist" Nov 24 14:26:45 crc kubenswrapper[5039]: I1124 14:26:45.705014 5039 scope.go:117] "RemoveContainer" containerID="f34bb15791ca824fc2d9112608182b11e2ff3a595366cb25a38ddc08ce54f97b" Nov 24 14:26:45 crc kubenswrapper[5039]: E1124 14:26:45.705420 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f34bb15791ca824fc2d9112608182b11e2ff3a595366cb25a38ddc08ce54f97b\": container with ID starting with f34bb15791ca824fc2d9112608182b11e2ff3a595366cb25a38ddc08ce54f97b not found: ID does not exist" containerID="f34bb15791ca824fc2d9112608182b11e2ff3a595366cb25a38ddc08ce54f97b" Nov 24 14:26:45 crc kubenswrapper[5039]: I1124 14:26:45.705474 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f34bb15791ca824fc2d9112608182b11e2ff3a595366cb25a38ddc08ce54f97b"} err="failed to get container status \"f34bb15791ca824fc2d9112608182b11e2ff3a595366cb25a38ddc08ce54f97b\": rpc error: code = NotFound desc = could not find container \"f34bb15791ca824fc2d9112608182b11e2ff3a595366cb25a38ddc08ce54f97b\": container with ID starting with f34bb15791ca824fc2d9112608182b11e2ff3a595366cb25a38ddc08ce54f97b not found: ID does not exist" Nov 24 14:26:46 crc kubenswrapper[5039]: I1124 14:26:46.318614 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eda0ee9b-8020-45b8-ac42-d063362d4bf1" path="/var/lib/kubelet/pods/eda0ee9b-8020-45b8-ac42-d063362d4bf1/volumes" Nov 24 14:28:20 crc kubenswrapper[5039]: I1124 14:28:20.101181 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:28:20 crc kubenswrapper[5039]: I1124 14:28:20.101683 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:28:50 crc kubenswrapper[5039]: I1124 14:28:50.101880 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:28:50 crc kubenswrapper[5039]: I1124 14:28:50.102731 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:29:03 crc kubenswrapper[5039]: I1124 14:29:03.235156 5039 generic.go:334] "Generic (PLEG): container finished" podID="d8589776-bb1f-42ea-8bfa-7053520c66b7" containerID="e837f946f1b412970dd485704539f48d3f48edc433d9e4bd25d72bbfa9a5e0db" exitCode=0 Nov 24 14:29:03 crc kubenswrapper[5039]: I1124 14:29:03.235352 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" event={"ID":"d8589776-bb1f-42ea-8bfa-7053520c66b7","Type":"ContainerDied","Data":"e837f946f1b412970dd485704539f48d3f48edc433d9e4bd25d72bbfa9a5e0db"} Nov 24 14:29:04 crc kubenswrapper[5039]: I1124 14:29:04.752366 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:29:04 crc kubenswrapper[5039]: I1124 14:29:04.947703 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceilometer-ipmi-config-data-1\") pod \"d8589776-bb1f-42ea-8bfa-7053520c66b7\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " Nov 24 14:29:04 crc kubenswrapper[5039]: I1124 14:29:04.948143 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-telemetry-power-monitoring-combined-ca-bundle\") pod \"d8589776-bb1f-42ea-8bfa-7053520c66b7\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " Nov 24 14:29:04 crc kubenswrapper[5039]: I1124 14:29:04.948195 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jcgkv\" (UniqueName: \"kubernetes.io/projected/d8589776-bb1f-42ea-8bfa-7053520c66b7-kube-api-access-jcgkv\") pod \"d8589776-bb1f-42ea-8bfa-7053520c66b7\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " Nov 24 14:29:04 crc kubenswrapper[5039]: I1124 14:29:04.948262 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceilometer-ipmi-config-data-2\") pod \"d8589776-bb1f-42ea-8bfa-7053520c66b7\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " Nov 24 14:29:04 crc kubenswrapper[5039]: I1124 14:29:04.948291 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ssh-key\") pod \"d8589776-bb1f-42ea-8bfa-7053520c66b7\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " Nov 24 14:29:04 crc kubenswrapper[5039]: I1124 14:29:04.948313 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-inventory\") pod \"d8589776-bb1f-42ea-8bfa-7053520c66b7\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " Nov 24 14:29:04 crc kubenswrapper[5039]: I1124 14:29:04.948352 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceph\") pod \"d8589776-bb1f-42ea-8bfa-7053520c66b7\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " Nov 24 14:29:04 crc kubenswrapper[5039]: I1124 14:29:04.948392 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceilometer-ipmi-config-data-0\") pod \"d8589776-bb1f-42ea-8bfa-7053520c66b7\" (UID: \"d8589776-bb1f-42ea-8bfa-7053520c66b7\") " Nov 24 14:29:04 crc kubenswrapper[5039]: I1124 14:29:04.958017 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceph" (OuterVolumeSpecName: "ceph") pod "d8589776-bb1f-42ea-8bfa-7053520c66b7" (UID: "d8589776-bb1f-42ea-8bfa-7053520c66b7"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:29:04 crc kubenswrapper[5039]: I1124 14:29:04.958297 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8589776-bb1f-42ea-8bfa-7053520c66b7-kube-api-access-jcgkv" (OuterVolumeSpecName: "kube-api-access-jcgkv") pod "d8589776-bb1f-42ea-8bfa-7053520c66b7" (UID: "d8589776-bb1f-42ea-8bfa-7053520c66b7"). InnerVolumeSpecName "kube-api-access-jcgkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:29:04 crc kubenswrapper[5039]: I1124 14:29:04.964228 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-telemetry-power-monitoring-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-power-monitoring-combined-ca-bundle") pod "d8589776-bb1f-42ea-8bfa-7053520c66b7" (UID: "d8589776-bb1f-42ea-8bfa-7053520c66b7"). InnerVolumeSpecName "telemetry-power-monitoring-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:29:04 crc kubenswrapper[5039]: I1124 14:29:04.988104 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceilometer-ipmi-config-data-2" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-2") pod "d8589776-bb1f-42ea-8bfa-7053520c66b7" (UID: "d8589776-bb1f-42ea-8bfa-7053520c66b7"). InnerVolumeSpecName "ceilometer-ipmi-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:29:04 crc kubenswrapper[5039]: I1124 14:29:04.989800 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-inventory" (OuterVolumeSpecName: "inventory") pod "d8589776-bb1f-42ea-8bfa-7053520c66b7" (UID: "d8589776-bb1f-42ea-8bfa-7053520c66b7"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:29:04 crc kubenswrapper[5039]: I1124 14:29:04.994463 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d8589776-bb1f-42ea-8bfa-7053520c66b7" (UID: "d8589776-bb1f-42ea-8bfa-7053520c66b7"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:29:04 crc kubenswrapper[5039]: I1124 14:29:04.995726 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceilometer-ipmi-config-data-0" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-0") pod "d8589776-bb1f-42ea-8bfa-7053520c66b7" (UID: "d8589776-bb1f-42ea-8bfa-7053520c66b7"). InnerVolumeSpecName "ceilometer-ipmi-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.007930 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceilometer-ipmi-config-data-1" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-1") pod "d8589776-bb1f-42ea-8bfa-7053520c66b7" (UID: "d8589776-bb1f-42ea-8bfa-7053520c66b7"). InnerVolumeSpecName "ceilometer-ipmi-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.050528 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jcgkv\" (UniqueName: \"kubernetes.io/projected/d8589776-bb1f-42ea-8bfa-7053520c66b7-kube-api-access-jcgkv\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.050580 5039 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceilometer-ipmi-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.050590 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.050599 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.050610 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.050618 5039 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceilometer-ipmi-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.050627 5039 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-ceilometer-ipmi-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.050638 5039 reconciler_common.go:293] "Volume detached for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8589776-bb1f-42ea-8bfa-7053520c66b7-telemetry-power-monitoring-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.270169 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" event={"ID":"d8589776-bb1f-42ea-8bfa-7053520c66b7","Type":"ContainerDied","Data":"5599be1d38ef10281ae11da5a311212aa7346a9a13156088474f128f39b8befb"} Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.270214 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5599be1d38ef10281ae11da5a311212aa7346a9a13156088474f128f39b8befb" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.270284 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.371821 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt"] Nov 24 14:29:05 crc kubenswrapper[5039]: E1124 14:29:05.372293 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eda0ee9b-8020-45b8-ac42-d063362d4bf1" containerName="extract-content" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.372310 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="eda0ee9b-8020-45b8-ac42-d063362d4bf1" containerName="extract-content" Nov 24 14:29:05 crc kubenswrapper[5039]: E1124 14:29:05.372330 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eda0ee9b-8020-45b8-ac42-d063362d4bf1" containerName="extract-utilities" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.372339 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="eda0ee9b-8020-45b8-ac42-d063362d4bf1" containerName="extract-utilities" Nov 24 14:29:05 crc kubenswrapper[5039]: E1124 14:29:05.372362 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8589776-bb1f-42ea-8bfa-7053520c66b7" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.372372 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8589776-bb1f-42ea-8bfa-7053520c66b7" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Nov 24 14:29:05 crc kubenswrapper[5039]: E1124 14:29:05.372406 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eda0ee9b-8020-45b8-ac42-d063362d4bf1" containerName="registry-server" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.372414 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="eda0ee9b-8020-45b8-ac42-d063362d4bf1" containerName="registry-server" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.372715 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8589776-bb1f-42ea-8bfa-7053520c66b7" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.372749 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="eda0ee9b-8020-45b8-ac42-d063362d4bf1" containerName="registry-server" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.373562 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.375356 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.375603 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"logging-compute-config-data" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.376095 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.376439 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q6zx5" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.376484 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.378485 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.390077 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt"] Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.566295 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-ssh-key\") pod \"logging-edpm-deployment-openstack-edpm-ipam-tpltt\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.566384 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvfll\" (UniqueName: \"kubernetes.io/projected/bed151c2-ef33-4571-b779-761a70733f9d-kube-api-access-hvfll\") pod \"logging-edpm-deployment-openstack-edpm-ipam-tpltt\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.566676 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-tpltt\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.567171 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-tpltt\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.567375 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-tpltt\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.567421 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-ceph\") pod \"logging-edpm-deployment-openstack-edpm-ipam-tpltt\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.669233 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-tpltt\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.669347 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-tpltt\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.669374 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-ceph\") pod \"logging-edpm-deployment-openstack-edpm-ipam-tpltt\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.669443 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-ssh-key\") pod \"logging-edpm-deployment-openstack-edpm-ipam-tpltt\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.669474 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvfll\" (UniqueName: \"kubernetes.io/projected/bed151c2-ef33-4571-b779-761a70733f9d-kube-api-access-hvfll\") pod \"logging-edpm-deployment-openstack-edpm-ipam-tpltt\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.669544 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-tpltt\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.673090 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-tpltt\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.673578 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-ssh-key\") pod \"logging-edpm-deployment-openstack-edpm-ipam-tpltt\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.674347 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-ceph\") pod \"logging-edpm-deployment-openstack-edpm-ipam-tpltt\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.674680 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-tpltt\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.675022 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-tpltt\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.686025 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvfll\" (UniqueName: \"kubernetes.io/projected/bed151c2-ef33-4571-b779-761a70733f9d-kube-api-access-hvfll\") pod \"logging-edpm-deployment-openstack-edpm-ipam-tpltt\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:05 crc kubenswrapper[5039]: I1124 14:29:05.695796 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:06 crc kubenswrapper[5039]: I1124 14:29:06.250856 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt"] Nov 24 14:29:06 crc kubenswrapper[5039]: W1124 14:29:06.258429 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbed151c2_ef33_4571_b779_761a70733f9d.slice/crio-65d8b7573a6679e65e031f42441186b5ce66175f57536ab725cc72967522e14e WatchSource:0}: Error finding container 65d8b7573a6679e65e031f42441186b5ce66175f57536ab725cc72967522e14e: Status 404 returned error can't find the container with id 65d8b7573a6679e65e031f42441186b5ce66175f57536ab725cc72967522e14e Nov 24 14:29:06 crc kubenswrapper[5039]: I1124 14:29:06.281198 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" event={"ID":"bed151c2-ef33-4571-b779-761a70733f9d","Type":"ContainerStarted","Data":"65d8b7573a6679e65e031f42441186b5ce66175f57536ab725cc72967522e14e"} Nov 24 14:29:07 crc kubenswrapper[5039]: I1124 14:29:07.299156 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" event={"ID":"bed151c2-ef33-4571-b779-761a70733f9d","Type":"ContainerStarted","Data":"9b6e112af92559672d94c63274a7fa3772eefd79a6735401be82e146ba326574"} Nov 24 14:29:07 crc kubenswrapper[5039]: I1124 14:29:07.322568 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" podStartSLOduration=1.868407 podStartE2EDuration="2.322544872s" podCreationTimestamp="2025-11-24 14:29:05 +0000 UTC" firstStartedPulling="2025-11-24 14:29:06.261146446 +0000 UTC m=+4258.700270956" lastFinishedPulling="2025-11-24 14:29:06.715284288 +0000 UTC m=+4259.154408828" observedRunningTime="2025-11-24 14:29:07.317573901 +0000 UTC m=+4259.756698421" watchObservedRunningTime="2025-11-24 14:29:07.322544872 +0000 UTC m=+4259.761669382" Nov 24 14:29:20 crc kubenswrapper[5039]: I1124 14:29:20.101288 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:29:20 crc kubenswrapper[5039]: I1124 14:29:20.102703 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:29:20 crc kubenswrapper[5039]: I1124 14:29:20.102768 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 14:29:20 crc kubenswrapper[5039]: I1124 14:29:20.103696 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 14:29:20 crc kubenswrapper[5039]: I1124 14:29:20.103757 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" gracePeriod=600 Nov 24 14:29:20 crc kubenswrapper[5039]: E1124 14:29:20.233132 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:29:20 crc kubenswrapper[5039]: I1124 14:29:20.425320 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" exitCode=0 Nov 24 14:29:20 crc kubenswrapper[5039]: I1124 14:29:20.425396 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40"} Nov 24 14:29:20 crc kubenswrapper[5039]: I1124 14:29:20.425470 5039 scope.go:117] "RemoveContainer" containerID="a44b192acf46995fecc0c4d4f9cc70c29cd209cc1b45f88a225e15065c976530" Nov 24 14:29:20 crc kubenswrapper[5039]: I1124 14:29:20.426694 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:29:20 crc kubenswrapper[5039]: E1124 14:29:20.427050 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:29:20 crc kubenswrapper[5039]: I1124 14:29:20.428021 5039 generic.go:334] "Generic (PLEG): container finished" podID="bed151c2-ef33-4571-b779-761a70733f9d" containerID="9b6e112af92559672d94c63274a7fa3772eefd79a6735401be82e146ba326574" exitCode=0 Nov 24 14:29:20 crc kubenswrapper[5039]: I1124 14:29:20.428070 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" event={"ID":"bed151c2-ef33-4571-b779-761a70733f9d","Type":"ContainerDied","Data":"9b6e112af92559672d94c63274a7fa3772eefd79a6735401be82e146ba326574"} Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.104969 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.246835 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-ceph\") pod \"bed151c2-ef33-4571-b779-761a70733f9d\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.247380 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-ssh-key\") pod \"bed151c2-ef33-4571-b779-761a70733f9d\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.247600 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-logging-compute-config-data-0\") pod \"bed151c2-ef33-4571-b779-761a70733f9d\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.247674 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-inventory\") pod \"bed151c2-ef33-4571-b779-761a70733f9d\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.247746 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hvfll\" (UniqueName: \"kubernetes.io/projected/bed151c2-ef33-4571-b779-761a70733f9d-kube-api-access-hvfll\") pod \"bed151c2-ef33-4571-b779-761a70733f9d\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.247798 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-logging-compute-config-data-1\") pod \"bed151c2-ef33-4571-b779-761a70733f9d\" (UID: \"bed151c2-ef33-4571-b779-761a70733f9d\") " Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.253680 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-ceph" (OuterVolumeSpecName: "ceph") pod "bed151c2-ef33-4571-b779-761a70733f9d" (UID: "bed151c2-ef33-4571-b779-761a70733f9d"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.254328 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bed151c2-ef33-4571-b779-761a70733f9d-kube-api-access-hvfll" (OuterVolumeSpecName: "kube-api-access-hvfll") pod "bed151c2-ef33-4571-b779-761a70733f9d" (UID: "bed151c2-ef33-4571-b779-761a70733f9d"). InnerVolumeSpecName "kube-api-access-hvfll". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.297817 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-logging-compute-config-data-0" (OuterVolumeSpecName: "logging-compute-config-data-0") pod "bed151c2-ef33-4571-b779-761a70733f9d" (UID: "bed151c2-ef33-4571-b779-761a70733f9d"). InnerVolumeSpecName "logging-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.302061 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-logging-compute-config-data-1" (OuterVolumeSpecName: "logging-compute-config-data-1") pod "bed151c2-ef33-4571-b779-761a70733f9d" (UID: "bed151c2-ef33-4571-b779-761a70733f9d"). InnerVolumeSpecName "logging-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.304607 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "bed151c2-ef33-4571-b779-761a70733f9d" (UID: "bed151c2-ef33-4571-b779-761a70733f9d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.304880 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-inventory" (OuterVolumeSpecName: "inventory") pod "bed151c2-ef33-4571-b779-761a70733f9d" (UID: "bed151c2-ef33-4571-b779-761a70733f9d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.352066 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hvfll\" (UniqueName: \"kubernetes.io/projected/bed151c2-ef33-4571-b779-761a70733f9d-kube-api-access-hvfll\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.352107 5039 reconciler_common.go:293] "Volume detached for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-logging-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.352122 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.352134 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.352151 5039 reconciler_common.go:293] "Volume detached for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-logging-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.352162 5039 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bed151c2-ef33-4571-b779-761a70733f9d-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.459277 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" event={"ID":"bed151c2-ef33-4571-b779-761a70733f9d","Type":"ContainerDied","Data":"65d8b7573a6679e65e031f42441186b5ce66175f57536ab725cc72967522e14e"} Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.459323 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="65d8b7573a6679e65e031f42441186b5ce66175f57536ab725cc72967522e14e" Nov 24 14:29:22 crc kubenswrapper[5039]: I1124 14:29:22.459340 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-tpltt" Nov 24 14:29:31 crc kubenswrapper[5039]: I1124 14:29:31.307960 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:29:31 crc kubenswrapper[5039]: E1124 14:29:31.310087 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.268041 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-backup-0"] Nov 24 14:29:39 crc kubenswrapper[5039]: E1124 14:29:39.269136 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bed151c2-ef33-4571-b779-761a70733f9d" containerName="logging-edpm-deployment-openstack-edpm-ipam" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.269151 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="bed151c2-ef33-4571-b779-761a70733f9d" containerName="logging-edpm-deployment-openstack-edpm-ipam" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.269373 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="bed151c2-ef33-4571-b779-761a70733f9d" containerName="logging-edpm-deployment-openstack-edpm-ipam" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.270717 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.275996 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.283493 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.292032 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.295184 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.307303 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-volume1-config-data" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.309476 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.329712 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.351589 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.351632 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.351651 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.351668 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-sys\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.351698 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zq472\" (UniqueName: \"kubernetes.io/projected/0ed5042d-f435-4adf-aa2b-6c1949957f4c-kube-api-access-zq472\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.351723 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.351746 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-lib-modules\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.351786 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.351847 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-run\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.351864 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ed5042d-f435-4adf-aa2b-6c1949957f4c-scripts\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.351894 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.351910 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-sys\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.351926 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2lt6k\" (UniqueName: \"kubernetes.io/projected/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-kube-api-access-2lt6k\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.351955 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-dev\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.351971 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-dev\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.351989 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.352008 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.352022 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0ed5042d-f435-4adf-aa2b-6c1949957f4c-config-data-custom\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.352068 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ed5042d-f435-4adf-aa2b-6c1949957f4c-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.352095 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/0ed5042d-f435-4adf-aa2b-6c1949957f4c-ceph\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.352113 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.352129 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.352144 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.352162 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.352177 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.352198 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-etc-nvme\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.352245 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.352262 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-run\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.352281 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.352317 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.352341 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ed5042d-f435-4adf-aa2b-6c1949957f4c-config-data\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.352361 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.453710 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.453755 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ed5042d-f435-4adf-aa2b-6c1949957f4c-config-data\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.453774 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.453795 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.453830 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.453845 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.453862 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-sys\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.453882 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zq472\" (UniqueName: \"kubernetes.io/projected/0ed5042d-f435-4adf-aa2b-6c1949957f4c-kube-api-access-zq472\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.453899 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.453931 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-lib-modules\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.453949 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.453980 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-run\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.453995 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ed5042d-f435-4adf-aa2b-6c1949957f4c-scripts\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.454034 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2lt6k\" (UniqueName: \"kubernetes.io/projected/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-kube-api-access-2lt6k\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.454048 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.454062 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-sys\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.454094 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-dev\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.454113 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-dev\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.454130 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.454151 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.454168 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0ed5042d-f435-4adf-aa2b-6c1949957f4c-config-data-custom\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.454200 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ed5042d-f435-4adf-aa2b-6c1949957f4c-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.454216 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/0ed5042d-f435-4adf-aa2b-6c1949957f4c-ceph\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.454237 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.454251 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.454268 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.454288 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.454303 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.454331 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-etc-nvme\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.454370 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.454387 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-run\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.454414 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.454680 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.455525 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-sys\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.456759 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.457198 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.457263 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.457403 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-etc-nvme\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.457457 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-run\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.457481 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.457559 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.457584 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.457607 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-dev\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.457879 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.457884 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.457913 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-sys\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.458153 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-run\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.458951 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-lib-modules\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.458980 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.459015 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.459036 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-dev\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.459936 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/0ed5042d-f435-4adf-aa2b-6c1949957f4c-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.461622 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.461868 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.463047 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ed5042d-f435-4adf-aa2b-6c1949957f4c-scripts\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.463133 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.464409 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.466099 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ed5042d-f435-4adf-aa2b-6c1949957f4c-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.466463 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.470238 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0ed5042d-f435-4adf-aa2b-6c1949957f4c-config-data-custom\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.472537 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/0ed5042d-f435-4adf-aa2b-6c1949957f4c-ceph\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.474715 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zq472\" (UniqueName: \"kubernetes.io/projected/0ed5042d-f435-4adf-aa2b-6c1949957f4c-kube-api-access-zq472\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.478395 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ed5042d-f435-4adf-aa2b-6c1949957f4c-config-data\") pod \"cinder-backup-0\" (UID: \"0ed5042d-f435-4adf-aa2b-6c1949957f4c\") " pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.478808 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2lt6k\" (UniqueName: \"kubernetes.io/projected/d82cb8c7-3a11-43f9-94a7-63e8a4b824d4-kube-api-access-2lt6k\") pod \"cinder-volume-volume1-0\" (UID: \"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4\") " pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.589380 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.612816 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.964896 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-create-9lfjc"] Nov 24 14:29:39 crc kubenswrapper[5039]: I1124 14:29:39.966946 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-9lfjc" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.003015 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-9lfjc"] Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.064187 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-7fe4-account-create-sp5d2"] Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.073894 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-7fe4-account-create-sp5d2" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.077294 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-db-secret" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.079176 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wk5q6\" (UniqueName: \"kubernetes.io/projected/943f0212-e353-41e8-9c0c-e1f1dc5d2649-kube-api-access-wk5q6\") pod \"manila-db-create-9lfjc\" (UID: \"943f0212-e353-41e8-9c0c-e1f1dc5d2649\") " pod="openstack/manila-db-create-9lfjc" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.079260 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/943f0212-e353-41e8-9c0c-e1f1dc5d2649-operator-scripts\") pod \"manila-db-create-9lfjc\" (UID: \"943f0212-e353-41e8-9c0c-e1f1dc5d2649\") " pod="openstack/manila-db-create-9lfjc" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.080361 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-7fe4-account-create-sp5d2"] Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.136023 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-fd8bc7b7f-2mcnl"] Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.138629 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-fd8bc7b7f-2mcnl" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.143736 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.143978 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-t9tf8" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.148302 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.148996 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.181724 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxn2h\" (UniqueName: \"kubernetes.io/projected/2b67cb20-74e6-4bfe-b117-b86937dbd140-kube-api-access-kxn2h\") pod \"manila-7fe4-account-create-sp5d2\" (UID: \"2b67cb20-74e6-4bfe-b117-b86937dbd140\") " pod="openstack/manila-7fe4-account-create-sp5d2" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.181808 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wk5q6\" (UniqueName: \"kubernetes.io/projected/943f0212-e353-41e8-9c0c-e1f1dc5d2649-kube-api-access-wk5q6\") pod \"manila-db-create-9lfjc\" (UID: \"943f0212-e353-41e8-9c0c-e1f1dc5d2649\") " pod="openstack/manila-db-create-9lfjc" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.181846 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/943f0212-e353-41e8-9c0c-e1f1dc5d2649-operator-scripts\") pod \"manila-db-create-9lfjc\" (UID: \"943f0212-e353-41e8-9c0c-e1f1dc5d2649\") " pod="openstack/manila-db-create-9lfjc" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.181868 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b67cb20-74e6-4bfe-b117-b86937dbd140-operator-scripts\") pod \"manila-7fe4-account-create-sp5d2\" (UID: \"2b67cb20-74e6-4bfe-b117-b86937dbd140\") " pod="openstack/manila-7fe4-account-create-sp5d2" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.182957 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/943f0212-e353-41e8-9c0c-e1f1dc5d2649-operator-scripts\") pod \"manila-db-create-9lfjc\" (UID: \"943f0212-e353-41e8-9c0c-e1f1dc5d2649\") " pod="openstack/manila-db-create-9lfjc" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.234042 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wk5q6\" (UniqueName: \"kubernetes.io/projected/943f0212-e353-41e8-9c0c-e1f1dc5d2649-kube-api-access-wk5q6\") pod \"manila-db-create-9lfjc\" (UID: \"943f0212-e353-41e8-9c0c-e1f1dc5d2649\") " pod="openstack/manila-db-create-9lfjc" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.235919 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-fd8bc7b7f-2mcnl"] Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.290149 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3943960b-1e53-413a-9dd9-505fe98db72d-config-data\") pod \"horizon-fd8bc7b7f-2mcnl\" (UID: \"3943960b-1e53-413a-9dd9-505fe98db72d\") " pod="openstack/horizon-fd8bc7b7f-2mcnl" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.294985 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3943960b-1e53-413a-9dd9-505fe98db72d-horizon-secret-key\") pod \"horizon-fd8bc7b7f-2mcnl\" (UID: \"3943960b-1e53-413a-9dd9-505fe98db72d\") " pod="openstack/horizon-fd8bc7b7f-2mcnl" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.295231 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxn2h\" (UniqueName: \"kubernetes.io/projected/2b67cb20-74e6-4bfe-b117-b86937dbd140-kube-api-access-kxn2h\") pod \"manila-7fe4-account-create-sp5d2\" (UID: \"2b67cb20-74e6-4bfe-b117-b86937dbd140\") " pod="openstack/manila-7fe4-account-create-sp5d2" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.295433 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3943960b-1e53-413a-9dd9-505fe98db72d-scripts\") pod \"horizon-fd8bc7b7f-2mcnl\" (UID: \"3943960b-1e53-413a-9dd9-505fe98db72d\") " pod="openstack/horizon-fd8bc7b7f-2mcnl" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.295565 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b67cb20-74e6-4bfe-b117-b86937dbd140-operator-scripts\") pod \"manila-7fe4-account-create-sp5d2\" (UID: \"2b67cb20-74e6-4bfe-b117-b86937dbd140\") " pod="openstack/manila-7fe4-account-create-sp5d2" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.295655 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gb4gj\" (UniqueName: \"kubernetes.io/projected/3943960b-1e53-413a-9dd9-505fe98db72d-kube-api-access-gb4gj\") pod \"horizon-fd8bc7b7f-2mcnl\" (UID: \"3943960b-1e53-413a-9dd9-505fe98db72d\") " pod="openstack/horizon-fd8bc7b7f-2mcnl" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.295810 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3943960b-1e53-413a-9dd9-505fe98db72d-logs\") pod \"horizon-fd8bc7b7f-2mcnl\" (UID: \"3943960b-1e53-413a-9dd9-505fe98db72d\") " pod="openstack/horizon-fd8bc7b7f-2mcnl" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.302517 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b67cb20-74e6-4bfe-b117-b86937dbd140-operator-scripts\") pod \"manila-7fe4-account-create-sp5d2\" (UID: \"2b67cb20-74e6-4bfe-b117-b86937dbd140\") " pod="openstack/manila-7fe4-account-create-sp5d2" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.302582 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-9lfjc" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.338362 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxn2h\" (UniqueName: \"kubernetes.io/projected/2b67cb20-74e6-4bfe-b117-b86937dbd140-kube-api-access-kxn2h\") pod \"manila-7fe4-account-create-sp5d2\" (UID: \"2b67cb20-74e6-4bfe-b117-b86937dbd140\") " pod="openstack/manila-7fe4-account-create-sp5d2" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.383934 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-76c7f554dc-ctpbw"] Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.385838 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-76c7f554dc-ctpbw" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.398093 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3943960b-1e53-413a-9dd9-505fe98db72d-logs\") pod \"horizon-fd8bc7b7f-2mcnl\" (UID: \"3943960b-1e53-413a-9dd9-505fe98db72d\") " pod="openstack/horizon-fd8bc7b7f-2mcnl" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.399745 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.400305 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3943960b-1e53-413a-9dd9-505fe98db72d-logs\") pod \"horizon-fd8bc7b7f-2mcnl\" (UID: \"3943960b-1e53-413a-9dd9-505fe98db72d\") " pod="openstack/horizon-fd8bc7b7f-2mcnl" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.401154 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3943960b-1e53-413a-9dd9-505fe98db72d-config-data\") pod \"horizon-fd8bc7b7f-2mcnl\" (UID: \"3943960b-1e53-413a-9dd9-505fe98db72d\") " pod="openstack/horizon-fd8bc7b7f-2mcnl" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.401210 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfkdf\" (UniqueName: \"kubernetes.io/projected/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-kube-api-access-xfkdf\") pod \"horizon-76c7f554dc-ctpbw\" (UID: \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\") " pod="openstack/horizon-76c7f554dc-ctpbw" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.401231 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3943960b-1e53-413a-9dd9-505fe98db72d-horizon-secret-key\") pod \"horizon-fd8bc7b7f-2mcnl\" (UID: \"3943960b-1e53-413a-9dd9-505fe98db72d\") " pod="openstack/horizon-fd8bc7b7f-2mcnl" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.401305 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-scripts\") pod \"horizon-76c7f554dc-ctpbw\" (UID: \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\") " pod="openstack/horizon-76c7f554dc-ctpbw" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.401342 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-logs\") pod \"horizon-76c7f554dc-ctpbw\" (UID: \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\") " pod="openstack/horizon-76c7f554dc-ctpbw" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.401464 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3943960b-1e53-413a-9dd9-505fe98db72d-scripts\") pod \"horizon-fd8bc7b7f-2mcnl\" (UID: \"3943960b-1e53-413a-9dd9-505fe98db72d\") " pod="openstack/horizon-fd8bc7b7f-2mcnl" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.401496 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-horizon-secret-key\") pod \"horizon-76c7f554dc-ctpbw\" (UID: \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\") " pod="openstack/horizon-76c7f554dc-ctpbw" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.403226 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gb4gj\" (UniqueName: \"kubernetes.io/projected/3943960b-1e53-413a-9dd9-505fe98db72d-kube-api-access-gb4gj\") pod \"horizon-fd8bc7b7f-2mcnl\" (UID: \"3943960b-1e53-413a-9dd9-505fe98db72d\") " pod="openstack/horizon-fd8bc7b7f-2mcnl" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.403287 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-config-data\") pod \"horizon-76c7f554dc-ctpbw\" (UID: \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\") " pod="openstack/horizon-76c7f554dc-ctpbw" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.403983 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3943960b-1e53-413a-9dd9-505fe98db72d-scripts\") pod \"horizon-fd8bc7b7f-2mcnl\" (UID: \"3943960b-1e53-413a-9dd9-505fe98db72d\") " pod="openstack/horizon-fd8bc7b7f-2mcnl" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.405266 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3943960b-1e53-413a-9dd9-505fe98db72d-config-data\") pod \"horizon-fd8bc7b7f-2mcnl\" (UID: \"3943960b-1e53-413a-9dd9-505fe98db72d\") " pod="openstack/horizon-fd8bc7b7f-2mcnl" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.415885 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.419814 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3943960b-1e53-413a-9dd9-505fe98db72d-horizon-secret-key\") pod \"horizon-fd8bc7b7f-2mcnl\" (UID: \"3943960b-1e53-413a-9dd9-505fe98db72d\") " pod="openstack/horizon-fd8bc7b7f-2mcnl" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.426259 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-2gxh2" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.426475 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.426612 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.426796 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.427390 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-76c7f554dc-ctpbw"] Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.430471 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-7fe4-account-create-sp5d2" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.431808 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gb4gj\" (UniqueName: \"kubernetes.io/projected/3943960b-1e53-413a-9dd9-505fe98db72d-kube-api-access-gb4gj\") pod \"horizon-fd8bc7b7f-2mcnl\" (UID: \"3943960b-1e53-413a-9dd9-505fe98db72d\") " pod="openstack/horizon-fd8bc7b7f-2mcnl" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.446900 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.467482 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.475475 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.479828 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.480542 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.480736 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.490748 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.508608 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-horizon-secret-key\") pod \"horizon-76c7f554dc-ctpbw\" (UID: \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\") " pod="openstack/horizon-76c7f554dc-ctpbw" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.508664 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-config-data\") pod \"horizon-76c7f554dc-ctpbw\" (UID: \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\") " pod="openstack/horizon-76c7f554dc-ctpbw" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.508741 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfkdf\" (UniqueName: \"kubernetes.io/projected/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-kube-api-access-xfkdf\") pod \"horizon-76c7f554dc-ctpbw\" (UID: \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\") " pod="openstack/horizon-76c7f554dc-ctpbw" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.508780 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-scripts\") pod \"horizon-76c7f554dc-ctpbw\" (UID: \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\") " pod="openstack/horizon-76c7f554dc-ctpbw" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.508802 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-logs\") pod \"horizon-76c7f554dc-ctpbw\" (UID: \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\") " pod="openstack/horizon-76c7f554dc-ctpbw" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.509190 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-logs\") pod \"horizon-76c7f554dc-ctpbw\" (UID: \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\") " pod="openstack/horizon-76c7f554dc-ctpbw" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.513712 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-scripts\") pod \"horizon-76c7f554dc-ctpbw\" (UID: \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\") " pod="openstack/horizon-76c7f554dc-ctpbw" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.513997 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-horizon-secret-key\") pod \"horizon-76c7f554dc-ctpbw\" (UID: \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\") " pod="openstack/horizon-76c7f554dc-ctpbw" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.516826 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-fd8bc7b7f-2mcnl" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.532358 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.533732 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-config-data\") pod \"horizon-76c7f554dc-ctpbw\" (UID: \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\") " pod="openstack/horizon-76c7f554dc-ctpbw" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.567157 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfkdf\" (UniqueName: \"kubernetes.io/projected/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-kube-api-access-xfkdf\") pod \"horizon-76c7f554dc-ctpbw\" (UID: \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\") " pod="openstack/horizon-76c7f554dc-ctpbw" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.590659 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.610813 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.610877 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-scripts\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.610909 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f1f3d379-c67d-4e38-8796-f784377550de-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.610938 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vjskf\" (UniqueName: \"kubernetes.io/projected/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-kube-api-access-vjskf\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.610995 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.611011 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/f1f3d379-c67d-4e38-8796-f784377550de-ceph\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.611039 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.611059 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.611082 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-logs\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.611107 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djtmh\" (UniqueName: \"kubernetes.io/projected/f1f3d379-c67d-4e38-8796-f784377550de-kube-api-access-djtmh\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.611124 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-ceph\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.611165 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-config-data\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.611208 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.611238 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.611252 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1f3d379-c67d-4e38-8796-f784377550de-logs\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.611274 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.611304 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.611328 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: W1124 14:29:40.644989 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd82cb8c7_3a11_43f9_94a7_63e8a4b824d4.slice/crio-fb647408952da9fdc7268bf910f0659c2cdb06af2ad0dabcfd63fce8e528b96e WatchSource:0}: Error finding container fb647408952da9fdc7268bf910f0659c2cdb06af2ad0dabcfd63fce8e528b96e: Status 404 returned error can't find the container with id fb647408952da9fdc7268bf910f0659c2cdb06af2ad0dabcfd63fce8e528b96e Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.686407 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"0ed5042d-f435-4adf-aa2b-6c1949957f4c","Type":"ContainerStarted","Data":"257fbf0e52840fbc1842c97a696fefdc6d2bacb1f01f0c972ce89ca210310313"} Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.707353 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4","Type":"ContainerStarted","Data":"fb647408952da9fdc7268bf910f0659c2cdb06af2ad0dabcfd63fce8e528b96e"} Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.713046 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-config-data\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.713123 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.713148 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.713162 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1f3d379-c67d-4e38-8796-f784377550de-logs\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.713186 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.713217 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.713242 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.713260 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.713280 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-scripts\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.713296 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f1f3d379-c67d-4e38-8796-f784377550de-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.713323 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vjskf\" (UniqueName: \"kubernetes.io/projected/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-kube-api-access-vjskf\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.713356 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.713371 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/f1f3d379-c67d-4e38-8796-f784377550de-ceph\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.713399 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.713418 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.713442 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-logs\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.713468 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djtmh\" (UniqueName: \"kubernetes.io/projected/f1f3d379-c67d-4e38-8796-f784377550de-kube-api-access-djtmh\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.713486 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-ceph\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.718799 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1f3d379-c67d-4e38-8796-f784377550de-logs\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.719791 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-ceph\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.721336 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.723061 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.723194 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.723610 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-logs\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.728362 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/f1f3d379-c67d-4e38-8796-f784377550de-ceph\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.731771 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f1f3d379-c67d-4e38-8796-f784377550de-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.732307 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-scripts\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.733868 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.734127 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.737780 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.740173 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.740655 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-config-data\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.743400 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-76c7f554dc-ctpbw" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.743548 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djtmh\" (UniqueName: \"kubernetes.io/projected/f1f3d379-c67d-4e38-8796-f784377550de-kube-api-access-djtmh\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.752594 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vjskf\" (UniqueName: \"kubernetes.io/projected/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-kube-api-access-vjskf\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.753263 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.755232 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.764320 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.769422 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:40 crc kubenswrapper[5039]: I1124 14:29:40.789996 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 14:29:41 crc kubenswrapper[5039]: I1124 14:29:41.061329 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 14:29:41 crc kubenswrapper[5039]: W1124 14:29:41.064447 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod943f0212_e353_41e8_9c0c_e1f1dc5d2649.slice/crio-d9804ad607f484e2a8f13516cada07daf938e7f196a5b6a8e453401ec8745795 WatchSource:0}: Error finding container d9804ad607f484e2a8f13516cada07daf938e7f196a5b6a8e453401ec8745795: Status 404 returned error can't find the container with id d9804ad607f484e2a8f13516cada07daf938e7f196a5b6a8e453401ec8745795 Nov 24 14:29:41 crc kubenswrapper[5039]: I1124 14:29:41.075538 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-9lfjc"] Nov 24 14:29:41 crc kubenswrapper[5039]: I1124 14:29:41.218971 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-7fe4-account-create-sp5d2"] Nov 24 14:29:41 crc kubenswrapper[5039]: W1124 14:29:41.273886 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b67cb20_74e6_4bfe_b117_b86937dbd140.slice/crio-e461adb98a4b9c4ed806dc437b3c185956e87a13f62b27c9d7c0ce3cb9b61412 WatchSource:0}: Error finding container e461adb98a4b9c4ed806dc437b3c185956e87a13f62b27c9d7c0ce3cb9b61412: Status 404 returned error can't find the container with id e461adb98a4b9c4ed806dc437b3c185956e87a13f62b27c9d7c0ce3cb9b61412 Nov 24 14:29:41 crc kubenswrapper[5039]: I1124 14:29:41.359971 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-fd8bc7b7f-2mcnl"] Nov 24 14:29:41 crc kubenswrapper[5039]: I1124 14:29:41.606660 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-76c7f554dc-ctpbw"] Nov 24 14:29:41 crc kubenswrapper[5039]: I1124 14:29:41.723192 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 14:29:41 crc kubenswrapper[5039]: I1124 14:29:41.727724 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-7fe4-account-create-sp5d2" event={"ID":"2b67cb20-74e6-4bfe-b117-b86937dbd140","Type":"ContainerStarted","Data":"e461adb98a4b9c4ed806dc437b3c185956e87a13f62b27c9d7c0ce3cb9b61412"} Nov 24 14:29:41 crc kubenswrapper[5039]: W1124 14:29:41.738344 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6a93d012_5b3e_4d77_82bb_c38e45fc9dde.slice/crio-77fa330dfa3fa8ba0c61f606f3dd5f614d38df0e38fb57f54a577749eb088a32 WatchSource:0}: Error finding container 77fa330dfa3fa8ba0c61f606f3dd5f614d38df0e38fb57f54a577749eb088a32: Status 404 returned error can't find the container with id 77fa330dfa3fa8ba0c61f606f3dd5f614d38df0e38fb57f54a577749eb088a32 Nov 24 14:29:41 crc kubenswrapper[5039]: I1124 14:29:41.743170 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-9lfjc" event={"ID":"943f0212-e353-41e8-9c0c-e1f1dc5d2649","Type":"ContainerStarted","Data":"ad160731468b1f2b18827d3ef5783f8094328d2d0e0690b80cd407cbf7365968"} Nov 24 14:29:41 crc kubenswrapper[5039]: I1124 14:29:41.743206 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-9lfjc" event={"ID":"943f0212-e353-41e8-9c0c-e1f1dc5d2649","Type":"ContainerStarted","Data":"d9804ad607f484e2a8f13516cada07daf938e7f196a5b6a8e453401ec8745795"} Nov 24 14:29:41 crc kubenswrapper[5039]: I1124 14:29:41.749840 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-76c7f554dc-ctpbw" event={"ID":"9f956cc7-2ab6-4818-8d36-09cbd169b9b2","Type":"ContainerStarted","Data":"414a38a61c47e811cbbbd709ef9a2454c2fb74e407be69ad74dae3f865fc0188"} Nov 24 14:29:41 crc kubenswrapper[5039]: I1124 14:29:41.779011 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-fd8bc7b7f-2mcnl" event={"ID":"3943960b-1e53-413a-9dd9-505fe98db72d","Type":"ContainerStarted","Data":"4ff08e493527c72bac4978a764094847353a57fc1601635885fbfb385a02af8c"} Nov 24 14:29:41 crc kubenswrapper[5039]: I1124 14:29:41.819411 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-db-create-9lfjc" podStartSLOduration=2.819383042 podStartE2EDuration="2.819383042s" podCreationTimestamp="2025-11-24 14:29:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 14:29:41.776839991 +0000 UTC m=+4294.215964491" watchObservedRunningTime="2025-11-24 14:29:41.819383042 +0000 UTC m=+4294.258507542" Nov 24 14:29:41 crc kubenswrapper[5039]: I1124 14:29:41.854223 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 14:29:42 crc kubenswrapper[5039]: I1124 14:29:42.796574 5039 generic.go:334] "Generic (PLEG): container finished" podID="2b67cb20-74e6-4bfe-b117-b86937dbd140" containerID="6ccc40f71c97f856119f4dd617bd49620c86b8b462b5d87e26fc3ab3bcb61a10" exitCode=0 Nov 24 14:29:42 crc kubenswrapper[5039]: I1124 14:29:42.796975 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-7fe4-account-create-sp5d2" event={"ID":"2b67cb20-74e6-4bfe-b117-b86937dbd140","Type":"ContainerDied","Data":"6ccc40f71c97f856119f4dd617bd49620c86b8b462b5d87e26fc3ab3bcb61a10"} Nov 24 14:29:42 crc kubenswrapper[5039]: I1124 14:29:42.802443 5039 generic.go:334] "Generic (PLEG): container finished" podID="943f0212-e353-41e8-9c0c-e1f1dc5d2649" containerID="ad160731468b1f2b18827d3ef5783f8094328d2d0e0690b80cd407cbf7365968" exitCode=0 Nov 24 14:29:42 crc kubenswrapper[5039]: I1124 14:29:42.802633 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-9lfjc" event={"ID":"943f0212-e353-41e8-9c0c-e1f1dc5d2649","Type":"ContainerDied","Data":"ad160731468b1f2b18827d3ef5783f8094328d2d0e0690b80cd407cbf7365968"} Nov 24 14:29:42 crc kubenswrapper[5039]: I1124 14:29:42.804774 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f1f3d379-c67d-4e38-8796-f784377550de","Type":"ContainerStarted","Data":"ad423712fcfced5db2534d1bb7a40cd24eafb8b67679ad7ee0419bbe2b1ea0c8"} Nov 24 14:29:42 crc kubenswrapper[5039]: I1124 14:29:42.807350 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6a93d012-5b3e-4d77-82bb-c38e45fc9dde","Type":"ContainerStarted","Data":"77fa330dfa3fa8ba0c61f606f3dd5f614d38df0e38fb57f54a577749eb088a32"} Nov 24 14:29:42 crc kubenswrapper[5039]: I1124 14:29:42.821239 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4","Type":"ContainerStarted","Data":"6f754c813848abbfc5b6132b221404718a204cc39f651ad2f09f6ab8ff0604af"} Nov 24 14:29:42 crc kubenswrapper[5039]: I1124 14:29:42.821296 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"d82cb8c7-3a11-43f9-94a7-63e8a4b824d4","Type":"ContainerStarted","Data":"ac388fdaabf24c8e487fa0fb1e59bac6aa91ba1f0ea2888f6b50b2ef48578ed4"} Nov 24 14:29:42 crc kubenswrapper[5039]: I1124 14:29:42.879485 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-volume1-0" podStartSLOduration=2.9442290829999997 podStartE2EDuration="3.879463656s" podCreationTimestamp="2025-11-24 14:29:39 +0000 UTC" firstStartedPulling="2025-11-24 14:29:40.65146201 +0000 UTC m=+4293.090586500" lastFinishedPulling="2025-11-24 14:29:41.586696573 +0000 UTC m=+4294.025821073" observedRunningTime="2025-11-24 14:29:42.86574197 +0000 UTC m=+4295.304866470" watchObservedRunningTime="2025-11-24 14:29:42.879463656 +0000 UTC m=+4295.318588156" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.542742 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-76c7f554dc-ctpbw"] Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.629322 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6c5b658bc4-625q2"] Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.634364 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.637560 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.657055 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6c5b658bc4-625q2"] Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.676344 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.711339 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ee231063-13a0-4a14-9864-362a8459b8e7-horizon-secret-key\") pod \"horizon-6c5b658bc4-625q2\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.711405 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee231063-13a0-4a14-9864-362a8459b8e7-horizon-tls-certs\") pod \"horizon-6c5b658bc4-625q2\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.711438 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee231063-13a0-4a14-9864-362a8459b8e7-combined-ca-bundle\") pod \"horizon-6c5b658bc4-625q2\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.711490 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee231063-13a0-4a14-9864-362a8459b8e7-logs\") pod \"horizon-6c5b658bc4-625q2\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.711529 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dcv96\" (UniqueName: \"kubernetes.io/projected/ee231063-13a0-4a14-9864-362a8459b8e7-kube-api-access-dcv96\") pod \"horizon-6c5b658bc4-625q2\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.711589 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ee231063-13a0-4a14-9864-362a8459b8e7-scripts\") pod \"horizon-6c5b658bc4-625q2\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.711690 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ee231063-13a0-4a14-9864-362a8459b8e7-config-data\") pod \"horizon-6c5b658bc4-625q2\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.735291 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-fd8bc7b7f-2mcnl"] Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.753128 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6fbc854bcb-ssv8l"] Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.755444 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.763375 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6fbc854bcb-ssv8l"] Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.821001 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a0e58d4-73eb-4baf-8698-4c67b711e1a8-combined-ca-bundle\") pod \"horizon-6fbc854bcb-ssv8l\" (UID: \"4a0e58d4-73eb-4baf-8698-4c67b711e1a8\") " pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.821482 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4a0e58d4-73eb-4baf-8698-4c67b711e1a8-horizon-secret-key\") pod \"horizon-6fbc854bcb-ssv8l\" (UID: \"4a0e58d4-73eb-4baf-8698-4c67b711e1a8\") " pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.821545 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8s7g5\" (UniqueName: \"kubernetes.io/projected/4a0e58d4-73eb-4baf-8698-4c67b711e1a8-kube-api-access-8s7g5\") pod \"horizon-6fbc854bcb-ssv8l\" (UID: \"4a0e58d4-73eb-4baf-8698-4c67b711e1a8\") " pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.821573 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ee231063-13a0-4a14-9864-362a8459b8e7-horizon-secret-key\") pod \"horizon-6c5b658bc4-625q2\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.821629 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4a0e58d4-73eb-4baf-8698-4c67b711e1a8-scripts\") pod \"horizon-6fbc854bcb-ssv8l\" (UID: \"4a0e58d4-73eb-4baf-8698-4c67b711e1a8\") " pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.821657 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee231063-13a0-4a14-9864-362a8459b8e7-horizon-tls-certs\") pod \"horizon-6c5b658bc4-625q2\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.821781 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee231063-13a0-4a14-9864-362a8459b8e7-combined-ca-bundle\") pod \"horizon-6c5b658bc4-625q2\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.821847 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee231063-13a0-4a14-9864-362a8459b8e7-logs\") pod \"horizon-6c5b658bc4-625q2\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.821879 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dcv96\" (UniqueName: \"kubernetes.io/projected/ee231063-13a0-4a14-9864-362a8459b8e7-kube-api-access-dcv96\") pod \"horizon-6c5b658bc4-625q2\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.821925 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4a0e58d4-73eb-4baf-8698-4c67b711e1a8-config-data\") pod \"horizon-6fbc854bcb-ssv8l\" (UID: \"4a0e58d4-73eb-4baf-8698-4c67b711e1a8\") " pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.821978 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ee231063-13a0-4a14-9864-362a8459b8e7-scripts\") pod \"horizon-6c5b658bc4-625q2\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.823211 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a0e58d4-73eb-4baf-8698-4c67b711e1a8-logs\") pod \"horizon-6fbc854bcb-ssv8l\" (UID: \"4a0e58d4-73eb-4baf-8698-4c67b711e1a8\") " pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.823839 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ee231063-13a0-4a14-9864-362a8459b8e7-config-data\") pod \"horizon-6c5b658bc4-625q2\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.824210 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a0e58d4-73eb-4baf-8698-4c67b711e1a8-horizon-tls-certs\") pod \"horizon-6fbc854bcb-ssv8l\" (UID: \"4a0e58d4-73eb-4baf-8698-4c67b711e1a8\") " pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.826361 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ee231063-13a0-4a14-9864-362a8459b8e7-scripts\") pod \"horizon-6c5b658bc4-625q2\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.826939 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ee231063-13a0-4a14-9864-362a8459b8e7-config-data\") pod \"horizon-6c5b658bc4-625q2\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.827573 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee231063-13a0-4a14-9864-362a8459b8e7-logs\") pod \"horizon-6c5b658bc4-625q2\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.827584 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee231063-13a0-4a14-9864-362a8459b8e7-combined-ca-bundle\") pod \"horizon-6c5b658bc4-625q2\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.834362 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ee231063-13a0-4a14-9864-362a8459b8e7-horizon-secret-key\") pod \"horizon-6c5b658bc4-625q2\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.844281 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dcv96\" (UniqueName: \"kubernetes.io/projected/ee231063-13a0-4a14-9864-362a8459b8e7-kube-api-access-dcv96\") pod \"horizon-6c5b658bc4-625q2\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.872460 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee231063-13a0-4a14-9864-362a8459b8e7-horizon-tls-certs\") pod \"horizon-6c5b658bc4-625q2\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.873289 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f1f3d379-c67d-4e38-8796-f784377550de","Type":"ContainerStarted","Data":"0e115499a4efe752aa8e738bb04ce1bce19142c2afb33be130d47974ffcf4b6b"} Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.893296 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6a93d012-5b3e-4d77-82bb-c38e45fc9dde","Type":"ContainerStarted","Data":"9c803368c88aa560f334bc2e9ea60dae5b8bfab10b5acee95fbe83dd0017b988"} Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.927794 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4a0e58d4-73eb-4baf-8698-4c67b711e1a8-horizon-secret-key\") pod \"horizon-6fbc854bcb-ssv8l\" (UID: \"4a0e58d4-73eb-4baf-8698-4c67b711e1a8\") " pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.928071 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8s7g5\" (UniqueName: \"kubernetes.io/projected/4a0e58d4-73eb-4baf-8698-4c67b711e1a8-kube-api-access-8s7g5\") pod \"horizon-6fbc854bcb-ssv8l\" (UID: \"4a0e58d4-73eb-4baf-8698-4c67b711e1a8\") " pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.928178 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4a0e58d4-73eb-4baf-8698-4c67b711e1a8-scripts\") pod \"horizon-6fbc854bcb-ssv8l\" (UID: \"4a0e58d4-73eb-4baf-8698-4c67b711e1a8\") " pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.929153 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4a0e58d4-73eb-4baf-8698-4c67b711e1a8-config-data\") pod \"horizon-6fbc854bcb-ssv8l\" (UID: \"4a0e58d4-73eb-4baf-8698-4c67b711e1a8\") " pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.929351 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a0e58d4-73eb-4baf-8698-4c67b711e1a8-logs\") pod \"horizon-6fbc854bcb-ssv8l\" (UID: \"4a0e58d4-73eb-4baf-8698-4c67b711e1a8\") " pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.929466 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a0e58d4-73eb-4baf-8698-4c67b711e1a8-horizon-tls-certs\") pod \"horizon-6fbc854bcb-ssv8l\" (UID: \"4a0e58d4-73eb-4baf-8698-4c67b711e1a8\") " pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.929659 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a0e58d4-73eb-4baf-8698-4c67b711e1a8-combined-ca-bundle\") pod \"horizon-6fbc854bcb-ssv8l\" (UID: \"4a0e58d4-73eb-4baf-8698-4c67b711e1a8\") " pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.928962 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4a0e58d4-73eb-4baf-8698-4c67b711e1a8-scripts\") pod \"horizon-6fbc854bcb-ssv8l\" (UID: \"4a0e58d4-73eb-4baf-8698-4c67b711e1a8\") " pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.933482 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4a0e58d4-73eb-4baf-8698-4c67b711e1a8-config-data\") pod \"horizon-6fbc854bcb-ssv8l\" (UID: \"4a0e58d4-73eb-4baf-8698-4c67b711e1a8\") " pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.933741 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a0e58d4-73eb-4baf-8698-4c67b711e1a8-logs\") pod \"horizon-6fbc854bcb-ssv8l\" (UID: \"4a0e58d4-73eb-4baf-8698-4c67b711e1a8\") " pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.934447 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4a0e58d4-73eb-4baf-8698-4c67b711e1a8-horizon-secret-key\") pod \"horizon-6fbc854bcb-ssv8l\" (UID: \"4a0e58d4-73eb-4baf-8698-4c67b711e1a8\") " pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.935736 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a0e58d4-73eb-4baf-8698-4c67b711e1a8-combined-ca-bundle\") pod \"horizon-6fbc854bcb-ssv8l\" (UID: \"4a0e58d4-73eb-4baf-8698-4c67b711e1a8\") " pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.939243 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a0e58d4-73eb-4baf-8698-4c67b711e1a8-horizon-tls-certs\") pod \"horizon-6fbc854bcb-ssv8l\" (UID: \"4a0e58d4-73eb-4baf-8698-4c67b711e1a8\") " pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.944432 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8s7g5\" (UniqueName: \"kubernetes.io/projected/4a0e58d4-73eb-4baf-8698-4c67b711e1a8-kube-api-access-8s7g5\") pod \"horizon-6fbc854bcb-ssv8l\" (UID: \"4a0e58d4-73eb-4baf-8698-4c67b711e1a8\") " pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:43 crc kubenswrapper[5039]: I1124 14:29:43.969471 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:29:44 crc kubenswrapper[5039]: I1124 14:29:44.132622 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:29:44 crc kubenswrapper[5039]: I1124 14:29:44.330542 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:29:44 crc kubenswrapper[5039]: E1124 14:29:44.331142 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:29:44 crc kubenswrapper[5039]: I1124 14:29:44.614956 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:44 crc kubenswrapper[5039]: I1124 14:29:44.918130 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6a93d012-5b3e-4d77-82bb-c38e45fc9dde","Type":"ContainerStarted","Data":"4ff928a832c382bdbca116763d305f9d83f4a06d6868603550d1f31b80b226ae"} Nov 24 14:29:44 crc kubenswrapper[5039]: I1124 14:29:44.918415 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="6a93d012-5b3e-4d77-82bb-c38e45fc9dde" containerName="glance-log" containerID="cri-o://9c803368c88aa560f334bc2e9ea60dae5b8bfab10b5acee95fbe83dd0017b988" gracePeriod=30 Nov 24 14:29:44 crc kubenswrapper[5039]: I1124 14:29:44.918560 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="6a93d012-5b3e-4d77-82bb-c38e45fc9dde" containerName="glance-httpd" containerID="cri-o://4ff928a832c382bdbca116763d305f9d83f4a06d6868603550d1f31b80b226ae" gracePeriod=30 Nov 24 14:29:44 crc kubenswrapper[5039]: I1124 14:29:44.944690 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.944669161 podStartE2EDuration="5.944669161s" podCreationTimestamp="2025-11-24 14:29:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 14:29:44.939493224 +0000 UTC m=+4297.378617724" watchObservedRunningTime="2025-11-24 14:29:44.944669161 +0000 UTC m=+4297.383793651" Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.560819 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-7fe4-account-create-sp5d2" Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.567066 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-9lfjc" Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.647243 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kxn2h\" (UniqueName: \"kubernetes.io/projected/2b67cb20-74e6-4bfe-b117-b86937dbd140-kube-api-access-kxn2h\") pod \"2b67cb20-74e6-4bfe-b117-b86937dbd140\" (UID: \"2b67cb20-74e6-4bfe-b117-b86937dbd140\") " Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.647365 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b67cb20-74e6-4bfe-b117-b86937dbd140-operator-scripts\") pod \"2b67cb20-74e6-4bfe-b117-b86937dbd140\" (UID: \"2b67cb20-74e6-4bfe-b117-b86937dbd140\") " Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.647665 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wk5q6\" (UniqueName: \"kubernetes.io/projected/943f0212-e353-41e8-9c0c-e1f1dc5d2649-kube-api-access-wk5q6\") pod \"943f0212-e353-41e8-9c0c-e1f1dc5d2649\" (UID: \"943f0212-e353-41e8-9c0c-e1f1dc5d2649\") " Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.647749 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/943f0212-e353-41e8-9c0c-e1f1dc5d2649-operator-scripts\") pod \"943f0212-e353-41e8-9c0c-e1f1dc5d2649\" (UID: \"943f0212-e353-41e8-9c0c-e1f1dc5d2649\") " Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.647936 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b67cb20-74e6-4bfe-b117-b86937dbd140-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2b67cb20-74e6-4bfe-b117-b86937dbd140" (UID: "2b67cb20-74e6-4bfe-b117-b86937dbd140"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.648392 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/943f0212-e353-41e8-9c0c-e1f1dc5d2649-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "943f0212-e353-41e8-9c0c-e1f1dc5d2649" (UID: "943f0212-e353-41e8-9c0c-e1f1dc5d2649"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.648518 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b67cb20-74e6-4bfe-b117-b86937dbd140-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.648541 5039 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/943f0212-e353-41e8-9c0c-e1f1dc5d2649-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.665308 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/943f0212-e353-41e8-9c0c-e1f1dc5d2649-kube-api-access-wk5q6" (OuterVolumeSpecName: "kube-api-access-wk5q6") pod "943f0212-e353-41e8-9c0c-e1f1dc5d2649" (UID: "943f0212-e353-41e8-9c0c-e1f1dc5d2649"). InnerVolumeSpecName "kube-api-access-wk5q6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.665949 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b67cb20-74e6-4bfe-b117-b86937dbd140-kube-api-access-kxn2h" (OuterVolumeSpecName: "kube-api-access-kxn2h") pod "2b67cb20-74e6-4bfe-b117-b86937dbd140" (UID: "2b67cb20-74e6-4bfe-b117-b86937dbd140"). InnerVolumeSpecName "kube-api-access-kxn2h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.765314 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kxn2h\" (UniqueName: \"kubernetes.io/projected/2b67cb20-74e6-4bfe-b117-b86937dbd140-kube-api-access-kxn2h\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.766392 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wk5q6\" (UniqueName: \"kubernetes.io/projected/943f0212-e353-41e8-9c0c-e1f1dc5d2649-kube-api-access-wk5q6\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.859491 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.949391 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"0ed5042d-f435-4adf-aa2b-6c1949957f4c","Type":"ContainerStarted","Data":"0513ec2b9e8cc07e2e081bbb3e845c4b110cb82798bbd145f462ad9f033ac239"} Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.964464 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f1f3d379-c67d-4e38-8796-f784377550de","Type":"ContainerStarted","Data":"5eccbedbb9e1904cad4b8d08630f3b76f1e76da361f1438c122667fe704b183f"} Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.964606 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="f1f3d379-c67d-4e38-8796-f784377550de" containerName="glance-log" containerID="cri-o://0e115499a4efe752aa8e738bb04ce1bce19142c2afb33be130d47974ffcf4b6b" gracePeriod=30 Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.965114 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="f1f3d379-c67d-4e38-8796-f784377550de" containerName="glance-httpd" containerID="cri-o://5eccbedbb9e1904cad4b8d08630f3b76f1e76da361f1438c122667fe704b183f" gracePeriod=30 Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.968632 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-logs\") pod \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.968678 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.968723 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-ceph\") pod \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.968784 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-scripts\") pod \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.968821 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-config-data\") pod \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.968850 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-httpd-run\") pod \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.968937 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-combined-ca-bundle\") pod \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.969063 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vjskf\" (UniqueName: \"kubernetes.io/projected/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-kube-api-access-vjskf\") pod \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.969120 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-internal-tls-certs\") pod \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\" (UID: \"6a93d012-5b3e-4d77-82bb-c38e45fc9dde\") " Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.970108 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-logs" (OuterVolumeSpecName: "logs") pod "6a93d012-5b3e-4d77-82bb-c38e45fc9dde" (UID: "6a93d012-5b3e-4d77-82bb-c38e45fc9dde"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.970610 5039 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-logs\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.971086 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "6a93d012-5b3e-4d77-82bb-c38e45fc9dde" (UID: "6a93d012-5b3e-4d77-82bb-c38e45fc9dde"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.985784 5039 generic.go:334] "Generic (PLEG): container finished" podID="6a93d012-5b3e-4d77-82bb-c38e45fc9dde" containerID="4ff928a832c382bdbca116763d305f9d83f4a06d6868603550d1f31b80b226ae" exitCode=143 Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.985914 5039 generic.go:334] "Generic (PLEG): container finished" podID="6a93d012-5b3e-4d77-82bb-c38e45fc9dde" containerID="9c803368c88aa560f334bc2e9ea60dae5b8bfab10b5acee95fbe83dd0017b988" exitCode=143 Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.986038 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6a93d012-5b3e-4d77-82bb-c38e45fc9dde","Type":"ContainerDied","Data":"4ff928a832c382bdbca116763d305f9d83f4a06d6868603550d1f31b80b226ae"} Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.986124 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6a93d012-5b3e-4d77-82bb-c38e45fc9dde","Type":"ContainerDied","Data":"9c803368c88aa560f334bc2e9ea60dae5b8bfab10b5acee95fbe83dd0017b988"} Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.987428 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6a93d012-5b3e-4d77-82bb-c38e45fc9dde","Type":"ContainerDied","Data":"77fa330dfa3fa8ba0c61f606f3dd5f614d38df0e38fb57f54a577749eb088a32"} Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.988260 5039 scope.go:117] "RemoveContainer" containerID="4ff928a832c382bdbca116763d305f9d83f4a06d6868603550d1f31b80b226ae" Nov 24 14:29:45 crc kubenswrapper[5039]: I1124 14:29:45.989737 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:45.996594 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance") pod "6a93d012-5b3e-4d77-82bb-c38e45fc9dde" (UID: "6a93d012-5b3e-4d77-82bb-c38e45fc9dde"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.005731 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-scripts" (OuterVolumeSpecName: "scripts") pod "6a93d012-5b3e-4d77-82bb-c38e45fc9dde" (UID: "6a93d012-5b3e-4d77-82bb-c38e45fc9dde"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:45.997927 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-7fe4-account-create-sp5d2" event={"ID":"2b67cb20-74e6-4bfe-b117-b86937dbd140","Type":"ContainerDied","Data":"e461adb98a4b9c4ed806dc437b3c185956e87a13f62b27c9d7c0ce3cb9b61412"} Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.005921 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e461adb98a4b9c4ed806dc437b3c185956e87a13f62b27c9d7c0ce3cb9b61412" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:45.997908 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-7fe4-account-create-sp5d2" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.008671 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-ceph" (OuterVolumeSpecName: "ceph") pod "6a93d012-5b3e-4d77-82bb-c38e45fc9dde" (UID: "6a93d012-5b3e-4d77-82bb-c38e45fc9dde"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.008706 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-kube-api-access-vjskf" (OuterVolumeSpecName: "kube-api-access-vjskf") pod "6a93d012-5b3e-4d77-82bb-c38e45fc9dde" (UID: "6a93d012-5b3e-4d77-82bb-c38e45fc9dde"). InnerVolumeSpecName "kube-api-access-vjskf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.019595 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6fbc854bcb-ssv8l"] Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.028832 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-9lfjc" event={"ID":"943f0212-e353-41e8-9c0c-e1f1dc5d2649","Type":"ContainerDied","Data":"d9804ad607f484e2a8f13516cada07daf938e7f196a5b6a8e453401ec8745795"} Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.028892 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d9804ad607f484e2a8f13516cada07daf938e7f196a5b6a8e453401ec8745795" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.028972 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-9lfjc" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.030903 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.030888424 podStartE2EDuration="6.030888424s" podCreationTimestamp="2025-11-24 14:29:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 14:29:45.985368862 +0000 UTC m=+4298.424493362" watchObservedRunningTime="2025-11-24 14:29:46.030888424 +0000 UTC m=+4298.470012924" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.072574 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vjskf\" (UniqueName: \"kubernetes.io/projected/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-kube-api-access-vjskf\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.073124 5039 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.073142 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.073154 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.073165 5039 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.078634 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6a93d012-5b3e-4d77-82bb-c38e45fc9dde" (UID: "6a93d012-5b3e-4d77-82bb-c38e45fc9dde"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.086750 5039 scope.go:117] "RemoveContainer" containerID="9c803368c88aa560f334bc2e9ea60dae5b8bfab10b5acee95fbe83dd0017b988" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.112835 5039 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.114238 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-config-data" (OuterVolumeSpecName: "config-data") pod "6a93d012-5b3e-4d77-82bb-c38e45fc9dde" (UID: "6a93d012-5b3e-4d77-82bb-c38e45fc9dde"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.152614 5039 scope.go:117] "RemoveContainer" containerID="4ff928a832c382bdbca116763d305f9d83f4a06d6868603550d1f31b80b226ae" Nov 24 14:29:46 crc kubenswrapper[5039]: E1124 14:29:46.153117 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ff928a832c382bdbca116763d305f9d83f4a06d6868603550d1f31b80b226ae\": container with ID starting with 4ff928a832c382bdbca116763d305f9d83f4a06d6868603550d1f31b80b226ae not found: ID does not exist" containerID="4ff928a832c382bdbca116763d305f9d83f4a06d6868603550d1f31b80b226ae" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.153158 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ff928a832c382bdbca116763d305f9d83f4a06d6868603550d1f31b80b226ae"} err="failed to get container status \"4ff928a832c382bdbca116763d305f9d83f4a06d6868603550d1f31b80b226ae\": rpc error: code = NotFound desc = could not find container \"4ff928a832c382bdbca116763d305f9d83f4a06d6868603550d1f31b80b226ae\": container with ID starting with 4ff928a832c382bdbca116763d305f9d83f4a06d6868603550d1f31b80b226ae not found: ID does not exist" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.153186 5039 scope.go:117] "RemoveContainer" containerID="9c803368c88aa560f334bc2e9ea60dae5b8bfab10b5acee95fbe83dd0017b988" Nov 24 14:29:46 crc kubenswrapper[5039]: E1124 14:29:46.153384 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c803368c88aa560f334bc2e9ea60dae5b8bfab10b5acee95fbe83dd0017b988\": container with ID starting with 9c803368c88aa560f334bc2e9ea60dae5b8bfab10b5acee95fbe83dd0017b988 not found: ID does not exist" containerID="9c803368c88aa560f334bc2e9ea60dae5b8bfab10b5acee95fbe83dd0017b988" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.153416 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c803368c88aa560f334bc2e9ea60dae5b8bfab10b5acee95fbe83dd0017b988"} err="failed to get container status \"9c803368c88aa560f334bc2e9ea60dae5b8bfab10b5acee95fbe83dd0017b988\": rpc error: code = NotFound desc = could not find container \"9c803368c88aa560f334bc2e9ea60dae5b8bfab10b5acee95fbe83dd0017b988\": container with ID starting with 9c803368c88aa560f334bc2e9ea60dae5b8bfab10b5acee95fbe83dd0017b988 not found: ID does not exist" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.153645 5039 scope.go:117] "RemoveContainer" containerID="4ff928a832c382bdbca116763d305f9d83f4a06d6868603550d1f31b80b226ae" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.153848 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ff928a832c382bdbca116763d305f9d83f4a06d6868603550d1f31b80b226ae"} err="failed to get container status \"4ff928a832c382bdbca116763d305f9d83f4a06d6868603550d1f31b80b226ae\": rpc error: code = NotFound desc = could not find container \"4ff928a832c382bdbca116763d305f9d83f4a06d6868603550d1f31b80b226ae\": container with ID starting with 4ff928a832c382bdbca116763d305f9d83f4a06d6868603550d1f31b80b226ae not found: ID does not exist" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.153868 5039 scope.go:117] "RemoveContainer" containerID="9c803368c88aa560f334bc2e9ea60dae5b8bfab10b5acee95fbe83dd0017b988" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.155060 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c803368c88aa560f334bc2e9ea60dae5b8bfab10b5acee95fbe83dd0017b988"} err="failed to get container status \"9c803368c88aa560f334bc2e9ea60dae5b8bfab10b5acee95fbe83dd0017b988\": rpc error: code = NotFound desc = could not find container \"9c803368c88aa560f334bc2e9ea60dae5b8bfab10b5acee95fbe83dd0017b988\": container with ID starting with 9c803368c88aa560f334bc2e9ea60dae5b8bfab10b5acee95fbe83dd0017b988 not found: ID does not exist" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.155878 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6c5b658bc4-625q2"] Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.167716 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "6a93d012-5b3e-4d77-82bb-c38e45fc9dde" (UID: "6a93d012-5b3e-4d77-82bb-c38e45fc9dde"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.175722 5039 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.175858 5039 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.175946 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.176019 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a93d012-5b3e-4d77-82bb-c38e45fc9dde-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.458839 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.484240 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.511582 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 14:29:46 crc kubenswrapper[5039]: E1124 14:29:46.512173 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b67cb20-74e6-4bfe-b117-b86937dbd140" containerName="mariadb-account-create" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.512188 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b67cb20-74e6-4bfe-b117-b86937dbd140" containerName="mariadb-account-create" Nov 24 14:29:46 crc kubenswrapper[5039]: E1124 14:29:46.512219 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a93d012-5b3e-4d77-82bb-c38e45fc9dde" containerName="glance-log" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.512226 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a93d012-5b3e-4d77-82bb-c38e45fc9dde" containerName="glance-log" Nov 24 14:29:46 crc kubenswrapper[5039]: E1124 14:29:46.512239 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="943f0212-e353-41e8-9c0c-e1f1dc5d2649" containerName="mariadb-database-create" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.512246 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="943f0212-e353-41e8-9c0c-e1f1dc5d2649" containerName="mariadb-database-create" Nov 24 14:29:46 crc kubenswrapper[5039]: E1124 14:29:46.512263 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a93d012-5b3e-4d77-82bb-c38e45fc9dde" containerName="glance-httpd" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.512271 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a93d012-5b3e-4d77-82bb-c38e45fc9dde" containerName="glance-httpd" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.512480 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="943f0212-e353-41e8-9c0c-e1f1dc5d2649" containerName="mariadb-database-create" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.512533 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b67cb20-74e6-4bfe-b117-b86937dbd140" containerName="mariadb-account-create" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.512545 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a93d012-5b3e-4d77-82bb-c38e45fc9dde" containerName="glance-httpd" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.512555 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a93d012-5b3e-4d77-82bb-c38e45fc9dde" containerName="glance-log" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.513733 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.516035 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.516265 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.534307 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.591419 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-logs\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.591489 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.591549 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.591584 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.591637 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-ceph\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.591693 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.591745 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.591793 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdttp\" (UniqueName: \"kubernetes.io/projected/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-kube-api-access-jdttp\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.591820 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.694719 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdttp\" (UniqueName: \"kubernetes.io/projected/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-kube-api-access-jdttp\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.694834 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.695039 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-logs\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.695127 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.695258 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.695304 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.695338 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-ceph\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.695449 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.696542 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.698600 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.698720 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.709958 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-logs\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.710748 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.713292 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.730216 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-ceph\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.732276 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.746917 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdttp\" (UniqueName: \"kubernetes.io/projected/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-kube-api-access-jdttp\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.763457 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f1ea0e7-3b9c-4fed-85cc-901484aed56f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.794635 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"6f1ea0e7-3b9c-4fed-85cc-901484aed56f\") " pod="openstack/glance-default-internal-api-0" Nov 24 14:29:46 crc kubenswrapper[5039]: I1124 14:29:46.859840 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.041694 5039 generic.go:334] "Generic (PLEG): container finished" podID="f1f3d379-c67d-4e38-8796-f784377550de" containerID="5eccbedbb9e1904cad4b8d08630f3b76f1e76da361f1438c122667fe704b183f" exitCode=0 Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.041730 5039 generic.go:334] "Generic (PLEG): container finished" podID="f1f3d379-c67d-4e38-8796-f784377550de" containerID="0e115499a4efe752aa8e738bb04ce1bce19142c2afb33be130d47974ffcf4b6b" exitCode=143 Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.041788 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f1f3d379-c67d-4e38-8796-f784377550de","Type":"ContainerDied","Data":"5eccbedbb9e1904cad4b8d08630f3b76f1e76da361f1438c122667fe704b183f"} Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.041817 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f1f3d379-c67d-4e38-8796-f784377550de","Type":"ContainerDied","Data":"0e115499a4efe752aa8e738bb04ce1bce19142c2afb33be130d47974ffcf4b6b"} Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.041826 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f1f3d379-c67d-4e38-8796-f784377550de","Type":"ContainerDied","Data":"ad423712fcfced5db2534d1bb7a40cd24eafb8b67679ad7ee0419bbe2b1ea0c8"} Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.041836 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad423712fcfced5db2534d1bb7a40cd24eafb8b67679ad7ee0419bbe2b1ea0c8" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.045022 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c5b658bc4-625q2" event={"ID":"ee231063-13a0-4a14-9864-362a8459b8e7","Type":"ContainerStarted","Data":"6d627112dc2f3d89e3514506b0417ae80dd3074af6499c36c11a54c6fcffcf51"} Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.046596 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6fbc854bcb-ssv8l" event={"ID":"4a0e58d4-73eb-4baf-8698-4c67b711e1a8","Type":"ContainerStarted","Data":"75632468123c5e27d067012849f9f2c996635e832c8880a7cc3b52cc921dd968"} Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.049014 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"0ed5042d-f435-4adf-aa2b-6c1949957f4c","Type":"ContainerStarted","Data":"b660492f13e0d16e41236a516ce05161343b856ab958d89a0b75e687e13b3cd8"} Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.106797 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-backup-0" podStartSLOduration=3.3240775989999998 podStartE2EDuration="8.106774165s" podCreationTimestamp="2025-11-24 14:29:39 +0000 UTC" firstStartedPulling="2025-11-24 14:29:40.431242477 +0000 UTC m=+4292.870366967" lastFinishedPulling="2025-11-24 14:29:45.213939033 +0000 UTC m=+4297.653063533" observedRunningTime="2025-11-24 14:29:47.084781847 +0000 UTC m=+4299.523906347" watchObservedRunningTime="2025-11-24 14:29:47.106774165 +0000 UTC m=+4299.545898665" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.594571 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.633134 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1f3d379-c67d-4e38-8796-f784377550de-logs\") pod \"f1f3d379-c67d-4e38-8796-f784377550de\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.633253 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-combined-ca-bundle\") pod \"f1f3d379-c67d-4e38-8796-f784377550de\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.633329 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-scripts\") pod \"f1f3d379-c67d-4e38-8796-f784377550de\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.633407 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/f1f3d379-c67d-4e38-8796-f784377550de-ceph\") pod \"f1f3d379-c67d-4e38-8796-f784377550de\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.633441 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-public-tls-certs\") pod \"f1f3d379-c67d-4e38-8796-f784377550de\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.633618 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djtmh\" (UniqueName: \"kubernetes.io/projected/f1f3d379-c67d-4e38-8796-f784377550de-kube-api-access-djtmh\") pod \"f1f3d379-c67d-4e38-8796-f784377550de\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.633676 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"f1f3d379-c67d-4e38-8796-f784377550de\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.633711 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-config-data\") pod \"f1f3d379-c67d-4e38-8796-f784377550de\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.633754 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f1f3d379-c67d-4e38-8796-f784377550de-httpd-run\") pod \"f1f3d379-c67d-4e38-8796-f784377550de\" (UID: \"f1f3d379-c67d-4e38-8796-f784377550de\") " Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.634681 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1f3d379-c67d-4e38-8796-f784377550de-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "f1f3d379-c67d-4e38-8796-f784377550de" (UID: "f1f3d379-c67d-4e38-8796-f784377550de"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.641446 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1f3d379-c67d-4e38-8796-f784377550de-logs" (OuterVolumeSpecName: "logs") pod "f1f3d379-c67d-4e38-8796-f784377550de" (UID: "f1f3d379-c67d-4e38-8796-f784377550de"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.644916 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1f3d379-c67d-4e38-8796-f784377550de-kube-api-access-djtmh" (OuterVolumeSpecName: "kube-api-access-djtmh") pod "f1f3d379-c67d-4e38-8796-f784377550de" (UID: "f1f3d379-c67d-4e38-8796-f784377550de"). InnerVolumeSpecName "kube-api-access-djtmh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.652127 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1f3d379-c67d-4e38-8796-f784377550de-ceph" (OuterVolumeSpecName: "ceph") pod "f1f3d379-c67d-4e38-8796-f784377550de" (UID: "f1f3d379-c67d-4e38-8796-f784377550de"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.663189 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "f1f3d379-c67d-4e38-8796-f784377550de" (UID: "f1f3d379-c67d-4e38-8796-f784377550de"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.666960 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-scripts" (OuterVolumeSpecName: "scripts") pod "f1f3d379-c67d-4e38-8796-f784377550de" (UID: "f1f3d379-c67d-4e38-8796-f784377550de"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.669074 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f1f3d379-c67d-4e38-8796-f784377550de" (UID: "f1f3d379-c67d-4e38-8796-f784377550de"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.708141 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-config-data" (OuterVolumeSpecName: "config-data") pod "f1f3d379-c67d-4e38-8796-f784377550de" (UID: "f1f3d379-c67d-4e38-8796-f784377550de"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.736343 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.736379 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/f1f3d379-c67d-4e38-8796-f784377550de-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.736395 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djtmh\" (UniqueName: \"kubernetes.io/projected/f1f3d379-c67d-4e38-8796-f784377550de-kube-api-access-djtmh\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.736636 5039 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.736650 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.736662 5039 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f1f3d379-c67d-4e38-8796-f784377550de-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.736675 5039 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1f3d379-c67d-4e38-8796-f784377550de-logs\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.736686 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.790265 5039 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.839719 5039 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.926684 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "f1f3d379-c67d-4e38-8796-f784377550de" (UID: "f1f3d379-c67d-4e38-8796-f784377550de"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:29:47 crc kubenswrapper[5039]: I1124 14:29:47.941184 5039 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1f3d379-c67d-4e38-8796-f784377550de-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.059563 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.106851 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.126671 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.148899 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 14:29:48 crc kubenswrapper[5039]: E1124 14:29:48.149539 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1f3d379-c67d-4e38-8796-f784377550de" containerName="glance-httpd" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.149555 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1f3d379-c67d-4e38-8796-f784377550de" containerName="glance-httpd" Nov 24 14:29:48 crc kubenswrapper[5039]: E1124 14:29:48.149574 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1f3d379-c67d-4e38-8796-f784377550de" containerName="glance-log" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.149599 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1f3d379-c67d-4e38-8796-f784377550de" containerName="glance-log" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.149807 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1f3d379-c67d-4e38-8796-f784377550de" containerName="glance-httpd" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.149837 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1f3d379-c67d-4e38-8796-f784377550de" containerName="glance-log" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.151361 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.158231 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.159203 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.161385 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.174368 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 14:29:48 crc kubenswrapper[5039]: W1124 14:29:48.202170 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f1ea0e7_3b9c_4fed_85cc_901484aed56f.slice/crio-7d4672f953736ed1327f80fcfad87943926b9a6c28472cf9f43699581ce10471 WatchSource:0}: Error finding container 7d4672f953736ed1327f80fcfad87943926b9a6c28472cf9f43699581ce10471: Status 404 returned error can't find the container with id 7d4672f953736ed1327f80fcfad87943926b9a6c28472cf9f43699581ce10471 Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.341799 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a93d012-5b3e-4d77-82bb-c38e45fc9dde" path="/var/lib/kubelet/pods/6a93d012-5b3e-4d77-82bb-c38e45fc9dde/volumes" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.343110 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1f3d379-c67d-4e38-8796-f784377550de" path="/var/lib/kubelet/pods/f1f3d379-c67d-4e38-8796-f784377550de/volumes" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.352446 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.352856 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8609b6fd-f97e-4af8-811f-c86e99bf033a-scripts\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.352974 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8609b6fd-f97e-4af8-811f-c86e99bf033a-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.353555 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8609b6fd-f97e-4af8-811f-c86e99bf033a-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.353688 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8609b6fd-f97e-4af8-811f-c86e99bf033a-ceph\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.353724 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8609b6fd-f97e-4af8-811f-c86e99bf033a-logs\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.353823 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8609b6fd-f97e-4af8-811f-c86e99bf033a-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.353888 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7dsv\" (UniqueName: \"kubernetes.io/projected/8609b6fd-f97e-4af8-811f-c86e99bf033a-kube-api-access-w7dsv\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.353918 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8609b6fd-f97e-4af8-811f-c86e99bf033a-config-data\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.514967 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8609b6fd-f97e-4af8-811f-c86e99bf033a-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.515093 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7dsv\" (UniqueName: \"kubernetes.io/projected/8609b6fd-f97e-4af8-811f-c86e99bf033a-kube-api-access-w7dsv\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.515151 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8609b6fd-f97e-4af8-811f-c86e99bf033a-config-data\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.515217 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.515259 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8609b6fd-f97e-4af8-811f-c86e99bf033a-scripts\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.515364 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8609b6fd-f97e-4af8-811f-c86e99bf033a-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.515401 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8609b6fd-f97e-4af8-811f-c86e99bf033a-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.515637 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8609b6fd-f97e-4af8-811f-c86e99bf033a-ceph\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.515691 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8609b6fd-f97e-4af8-811f-c86e99bf033a-logs\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.516186 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8609b6fd-f97e-4af8-811f-c86e99bf033a-logs\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.522301 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8609b6fd-f97e-4af8-811f-c86e99bf033a-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.525830 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.532021 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8609b6fd-f97e-4af8-811f-c86e99bf033a-scripts\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.533796 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8609b6fd-f97e-4af8-811f-c86e99bf033a-config-data\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.537411 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8609b6fd-f97e-4af8-811f-c86e99bf033a-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.545433 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8609b6fd-f97e-4af8-811f-c86e99bf033a-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.550532 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8609b6fd-f97e-4af8-811f-c86e99bf033a-ceph\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:48 crc kubenswrapper[5039]: I1124 14:29:48.693381 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:49 crc kubenswrapper[5039]: I1124 14:29:49.071913 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6f1ea0e7-3b9c-4fed-85cc-901484aed56f","Type":"ContainerStarted","Data":"7d4672f953736ed1327f80fcfad87943926b9a6c28472cf9f43699581ce10471"} Nov 24 14:29:49 crc kubenswrapper[5039]: I1124 14:29:49.279202 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7dsv\" (UniqueName: \"kubernetes.io/projected/8609b6fd-f97e-4af8-811f-c86e99bf033a-kube-api-access-w7dsv\") pod \"glance-default-external-api-0\" (UID: \"8609b6fd-f97e-4af8-811f-c86e99bf033a\") " pod="openstack/glance-default-external-api-0" Nov 24 14:29:49 crc kubenswrapper[5039]: I1124 14:29:49.419714 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 14:29:49 crc kubenswrapper[5039]: I1124 14:29:49.590436 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Nov 24 14:29:49 crc kubenswrapper[5039]: I1124 14:29:49.858806 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-volume1-0" Nov 24 14:29:50 crc kubenswrapper[5039]: I1124 14:29:50.092582 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6f1ea0e7-3b9c-4fed-85cc-901484aed56f","Type":"ContainerStarted","Data":"07f7a429f80f7c200950e97a854b7e4fe983593182d8052f78f2a626cc775395"} Nov 24 14:29:50 crc kubenswrapper[5039]: I1124 14:29:50.557823 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-sync-2th7h"] Nov 24 14:29:50 crc kubenswrapper[5039]: I1124 14:29:50.559535 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-2th7h" Nov 24 14:29:50 crc kubenswrapper[5039]: I1124 14:29:50.562910 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-7579r" Nov 24 14:29:50 crc kubenswrapper[5039]: I1124 14:29:50.563144 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 24 14:29:50 crc kubenswrapper[5039]: I1124 14:29:50.568859 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-2th7h"] Nov 24 14:29:50 crc kubenswrapper[5039]: I1124 14:29:50.675353 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42960250-7fd2-4db6-9670-dfbe653c2713-combined-ca-bundle\") pod \"manila-db-sync-2th7h\" (UID: \"42960250-7fd2-4db6-9670-dfbe653c2713\") " pod="openstack/manila-db-sync-2th7h" Nov 24 14:29:50 crc kubenswrapper[5039]: I1124 14:29:50.675755 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/42960250-7fd2-4db6-9670-dfbe653c2713-job-config-data\") pod \"manila-db-sync-2th7h\" (UID: \"42960250-7fd2-4db6-9670-dfbe653c2713\") " pod="openstack/manila-db-sync-2th7h" Nov 24 14:29:50 crc kubenswrapper[5039]: I1124 14:29:50.676073 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjtb8\" (UniqueName: \"kubernetes.io/projected/42960250-7fd2-4db6-9670-dfbe653c2713-kube-api-access-hjtb8\") pod \"manila-db-sync-2th7h\" (UID: \"42960250-7fd2-4db6-9670-dfbe653c2713\") " pod="openstack/manila-db-sync-2th7h" Nov 24 14:29:50 crc kubenswrapper[5039]: I1124 14:29:50.676272 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42960250-7fd2-4db6-9670-dfbe653c2713-config-data\") pod \"manila-db-sync-2th7h\" (UID: \"42960250-7fd2-4db6-9670-dfbe653c2713\") " pod="openstack/manila-db-sync-2th7h" Nov 24 14:29:50 crc kubenswrapper[5039]: I1124 14:29:50.780036 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/42960250-7fd2-4db6-9670-dfbe653c2713-job-config-data\") pod \"manila-db-sync-2th7h\" (UID: \"42960250-7fd2-4db6-9670-dfbe653c2713\") " pod="openstack/manila-db-sync-2th7h" Nov 24 14:29:50 crc kubenswrapper[5039]: I1124 14:29:50.780164 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjtb8\" (UniqueName: \"kubernetes.io/projected/42960250-7fd2-4db6-9670-dfbe653c2713-kube-api-access-hjtb8\") pod \"manila-db-sync-2th7h\" (UID: \"42960250-7fd2-4db6-9670-dfbe653c2713\") " pod="openstack/manila-db-sync-2th7h" Nov 24 14:29:50 crc kubenswrapper[5039]: I1124 14:29:50.780246 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42960250-7fd2-4db6-9670-dfbe653c2713-config-data\") pod \"manila-db-sync-2th7h\" (UID: \"42960250-7fd2-4db6-9670-dfbe653c2713\") " pod="openstack/manila-db-sync-2th7h" Nov 24 14:29:50 crc kubenswrapper[5039]: I1124 14:29:50.780301 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42960250-7fd2-4db6-9670-dfbe653c2713-combined-ca-bundle\") pod \"manila-db-sync-2th7h\" (UID: \"42960250-7fd2-4db6-9670-dfbe653c2713\") " pod="openstack/manila-db-sync-2th7h" Nov 24 14:29:50 crc kubenswrapper[5039]: I1124 14:29:50.786813 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/42960250-7fd2-4db6-9670-dfbe653c2713-job-config-data\") pod \"manila-db-sync-2th7h\" (UID: \"42960250-7fd2-4db6-9670-dfbe653c2713\") " pod="openstack/manila-db-sync-2th7h" Nov 24 14:29:50 crc kubenswrapper[5039]: I1124 14:29:50.787270 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42960250-7fd2-4db6-9670-dfbe653c2713-combined-ca-bundle\") pod \"manila-db-sync-2th7h\" (UID: \"42960250-7fd2-4db6-9670-dfbe653c2713\") " pod="openstack/manila-db-sync-2th7h" Nov 24 14:29:50 crc kubenswrapper[5039]: I1124 14:29:50.795728 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42960250-7fd2-4db6-9670-dfbe653c2713-config-data\") pod \"manila-db-sync-2th7h\" (UID: \"42960250-7fd2-4db6-9670-dfbe653c2713\") " pod="openstack/manila-db-sync-2th7h" Nov 24 14:29:50 crc kubenswrapper[5039]: I1124 14:29:50.801238 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjtb8\" (UniqueName: \"kubernetes.io/projected/42960250-7fd2-4db6-9670-dfbe653c2713-kube-api-access-hjtb8\") pod \"manila-db-sync-2th7h\" (UID: \"42960250-7fd2-4db6-9670-dfbe653c2713\") " pod="openstack/manila-db-sync-2th7h" Nov 24 14:29:50 crc kubenswrapper[5039]: I1124 14:29:50.883125 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-2th7h" Nov 24 14:29:54 crc kubenswrapper[5039]: I1124 14:29:54.859267 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Nov 24 14:29:55 crc kubenswrapper[5039]: I1124 14:29:55.153488 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 14:29:55 crc kubenswrapper[5039]: W1124 14:29:55.163202 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8609b6fd_f97e_4af8_811f_c86e99bf033a.slice/crio-eebe46d8279769b74cb260b912049a018ec8ef7a33086c5a5caa9bd53a23ea91 WatchSource:0}: Error finding container eebe46d8279769b74cb260b912049a018ec8ef7a33086c5a5caa9bd53a23ea91: Status 404 returned error can't find the container with id eebe46d8279769b74cb260b912049a018ec8ef7a33086c5a5caa9bd53a23ea91 Nov 24 14:29:55 crc kubenswrapper[5039]: I1124 14:29:55.184494 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-76c7f554dc-ctpbw" event={"ID":"9f956cc7-2ab6-4818-8d36-09cbd169b9b2","Type":"ContainerStarted","Data":"6166b96488451436dcc3cdb61d575d0c3dff77f100ae949a59254bf6a559ba52"} Nov 24 14:29:55 crc kubenswrapper[5039]: I1124 14:29:55.184559 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-76c7f554dc-ctpbw" event={"ID":"9f956cc7-2ab6-4818-8d36-09cbd169b9b2","Type":"ContainerStarted","Data":"9894b19bd17da2eda60b53363718f20a91bc4721b85a456121e60cfe5a83ae30"} Nov 24 14:29:55 crc kubenswrapper[5039]: I1124 14:29:55.184736 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-76c7f554dc-ctpbw" podUID="9f956cc7-2ab6-4818-8d36-09cbd169b9b2" containerName="horizon" containerID="cri-o://6166b96488451436dcc3cdb61d575d0c3dff77f100ae949a59254bf6a559ba52" gracePeriod=30 Nov 24 14:29:55 crc kubenswrapper[5039]: I1124 14:29:55.184710 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-76c7f554dc-ctpbw" podUID="9f956cc7-2ab6-4818-8d36-09cbd169b9b2" containerName="horizon-log" containerID="cri-o://9894b19bd17da2eda60b53363718f20a91bc4721b85a456121e60cfe5a83ae30" gracePeriod=30 Nov 24 14:29:55 crc kubenswrapper[5039]: I1124 14:29:55.195081 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-fd8bc7b7f-2mcnl" event={"ID":"3943960b-1e53-413a-9dd9-505fe98db72d","Type":"ContainerStarted","Data":"b29cd16920bf897136ddba32f54dc338246d34d667368df3aa562dc9041032ae"} Nov 24 14:29:55 crc kubenswrapper[5039]: I1124 14:29:55.195144 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-fd8bc7b7f-2mcnl" event={"ID":"3943960b-1e53-413a-9dd9-505fe98db72d","Type":"ContainerStarted","Data":"c24e8960d0a07ce16a6321bb797ed71d3f88109b75919a561c453705b98ac0af"} Nov 24 14:29:55 crc kubenswrapper[5039]: I1124 14:29:55.195314 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-fd8bc7b7f-2mcnl" podUID="3943960b-1e53-413a-9dd9-505fe98db72d" containerName="horizon-log" containerID="cri-o://c24e8960d0a07ce16a6321bb797ed71d3f88109b75919a561c453705b98ac0af" gracePeriod=30 Nov 24 14:29:55 crc kubenswrapper[5039]: I1124 14:29:55.195397 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-fd8bc7b7f-2mcnl" podUID="3943960b-1e53-413a-9dd9-505fe98db72d" containerName="horizon" containerID="cri-o://b29cd16920bf897136ddba32f54dc338246d34d667368df3aa562dc9041032ae" gracePeriod=30 Nov 24 14:29:55 crc kubenswrapper[5039]: I1124 14:29:55.200078 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c5b658bc4-625q2" event={"ID":"ee231063-13a0-4a14-9864-362a8459b8e7","Type":"ContainerStarted","Data":"d49ed4e675c018d6675328a5efa7109f8ba13218f9a5be6b7962ff27a851bccd"} Nov 24 14:29:55 crc kubenswrapper[5039]: I1124 14:29:55.212281 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-76c7f554dc-ctpbw" podStartSLOduration=2.3430009529999998 podStartE2EDuration="15.212257529s" podCreationTimestamp="2025-11-24 14:29:40 +0000 UTC" firstStartedPulling="2025-11-24 14:29:41.625203945 +0000 UTC m=+4294.064328445" lastFinishedPulling="2025-11-24 14:29:54.494460521 +0000 UTC m=+4306.933585021" observedRunningTime="2025-11-24 14:29:55.2094379 +0000 UTC m=+4307.648562390" watchObservedRunningTime="2025-11-24 14:29:55.212257529 +0000 UTC m=+4307.651382029" Nov 24 14:29:55 crc kubenswrapper[5039]: I1124 14:29:55.248954 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6fbc854bcb-ssv8l" event={"ID":"4a0e58d4-73eb-4baf-8698-4c67b711e1a8","Type":"ContainerStarted","Data":"c7710e75f483767860bf9e5253736acb4848ba472fae6022082e5cff2d9fa20f"} Nov 24 14:29:55 crc kubenswrapper[5039]: I1124 14:29:55.252699 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-fd8bc7b7f-2mcnl" podStartSLOduration=2.270848638 podStartE2EDuration="15.252683267s" podCreationTimestamp="2025-11-24 14:29:40 +0000 UTC" firstStartedPulling="2025-11-24 14:29:41.498488057 +0000 UTC m=+4293.937612557" lastFinishedPulling="2025-11-24 14:29:54.480322686 +0000 UTC m=+4306.919447186" observedRunningTime="2025-11-24 14:29:55.235386674 +0000 UTC m=+4307.674511174" watchObservedRunningTime="2025-11-24 14:29:55.252683267 +0000 UTC m=+4307.691807767" Nov 24 14:29:55 crc kubenswrapper[5039]: I1124 14:29:55.253656 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-2th7h"] Nov 24 14:29:56 crc kubenswrapper[5039]: I1124 14:29:56.286779 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6f1ea0e7-3b9c-4fed-85cc-901484aed56f","Type":"ContainerStarted","Data":"f241f198ad19c58c9831f9eba22d35f7cdc391fbeb114228319ceb47e8e3be46"} Nov 24 14:29:56 crc kubenswrapper[5039]: I1124 14:29:56.294596 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-2th7h" event={"ID":"42960250-7fd2-4db6-9670-dfbe653c2713","Type":"ContainerStarted","Data":"cf026da70a0109e4f9030a1bc9922f80041f80a796bed17b0fbd9bb6e7cd33a1"} Nov 24 14:29:56 crc kubenswrapper[5039]: I1124 14:29:56.299374 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c5b658bc4-625q2" event={"ID":"ee231063-13a0-4a14-9864-362a8459b8e7","Type":"ContainerStarted","Data":"c83ec4fbe52dbd8b6865ecf997689e3c1746c09451f88b0f608e7cd6c1b6502d"} Nov 24 14:29:56 crc kubenswrapper[5039]: I1124 14:29:56.306081 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6fbc854bcb-ssv8l" event={"ID":"4a0e58d4-73eb-4baf-8698-4c67b711e1a8","Type":"ContainerStarted","Data":"80b8c44cb23587bffbf7e9ef28fe08ab4ecf2f5e92e68c998782a09a8605dbc6"} Nov 24 14:29:56 crc kubenswrapper[5039]: I1124 14:29:56.332348 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8609b6fd-f97e-4af8-811f-c86e99bf033a","Type":"ContainerStarted","Data":"fe5bd674a58529e57e8dea4834505e64ed8275a662b61727975176afd6307d5a"} Nov 24 14:29:56 crc kubenswrapper[5039]: I1124 14:29:56.332390 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8609b6fd-f97e-4af8-811f-c86e99bf033a","Type":"ContainerStarted","Data":"eebe46d8279769b74cb260b912049a018ec8ef7a33086c5a5caa9bd53a23ea91"} Nov 24 14:29:56 crc kubenswrapper[5039]: I1124 14:29:56.362126 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=10.362105737 podStartE2EDuration="10.362105737s" podCreationTimestamp="2025-11-24 14:29:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 14:29:56.31887644 +0000 UTC m=+4308.758000940" watchObservedRunningTime="2025-11-24 14:29:56.362105737 +0000 UTC m=+4308.801230237" Nov 24 14:29:56 crc kubenswrapper[5039]: I1124 14:29:56.377405 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6c5b658bc4-625q2" podStartSLOduration=5.048642569 podStartE2EDuration="13.377387961s" podCreationTimestamp="2025-11-24 14:29:43 +0000 UTC" firstStartedPulling="2025-11-24 14:29:46.167726609 +0000 UTC m=+4298.606851109" lastFinishedPulling="2025-11-24 14:29:54.496472001 +0000 UTC m=+4306.935596501" observedRunningTime="2025-11-24 14:29:56.342775735 +0000 UTC m=+4308.781900235" watchObservedRunningTime="2025-11-24 14:29:56.377387961 +0000 UTC m=+4308.816512461" Nov 24 14:29:56 crc kubenswrapper[5039]: I1124 14:29:56.395120 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6fbc854bcb-ssv8l" podStartSLOduration=4.985322002 podStartE2EDuration="13.395100744s" podCreationTimestamp="2025-11-24 14:29:43 +0000 UTC" firstStartedPulling="2025-11-24 14:29:46.086093864 +0000 UTC m=+4298.525218364" lastFinishedPulling="2025-11-24 14:29:54.495872606 +0000 UTC m=+4306.934997106" observedRunningTime="2025-11-24 14:29:56.36960073 +0000 UTC m=+4308.808725240" watchObservedRunningTime="2025-11-24 14:29:56.395100744 +0000 UTC m=+4308.834225234" Nov 24 14:29:56 crc kubenswrapper[5039]: I1124 14:29:56.861205 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 24 14:29:56 crc kubenswrapper[5039]: I1124 14:29:56.861267 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 24 14:29:56 crc kubenswrapper[5039]: I1124 14:29:56.904391 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 24 14:29:56 crc kubenswrapper[5039]: I1124 14:29:56.914040 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 24 14:29:57 crc kubenswrapper[5039]: I1124 14:29:57.338726 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8609b6fd-f97e-4af8-811f-c86e99bf033a","Type":"ContainerStarted","Data":"f011ca2214c506096b2d082cc44ebdb9323ccbbdad540c0341cb6275b9f89804"} Nov 24 14:29:57 crc kubenswrapper[5039]: I1124 14:29:57.339200 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 24 14:29:57 crc kubenswrapper[5039]: I1124 14:29:57.342153 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 24 14:29:57 crc kubenswrapper[5039]: I1124 14:29:57.359116 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=9.359090259 podStartE2EDuration="9.359090259s" podCreationTimestamp="2025-11-24 14:29:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 14:29:57.357285215 +0000 UTC m=+4309.796409725" watchObservedRunningTime="2025-11-24 14:29:57.359090259 +0000 UTC m=+4309.798214759" Nov 24 14:29:59 crc kubenswrapper[5039]: I1124 14:29:59.308457 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:29:59 crc kubenswrapper[5039]: E1124 14:29:59.309037 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:29:59 crc kubenswrapper[5039]: I1124 14:29:59.362746 5039 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 14:29:59 crc kubenswrapper[5039]: I1124 14:29:59.420139 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 24 14:29:59 crc kubenswrapper[5039]: I1124 14:29:59.421219 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 24 14:29:59 crc kubenswrapper[5039]: I1124 14:29:59.481390 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 24 14:29:59 crc kubenswrapper[5039]: I1124 14:29:59.481808 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 24 14:30:00 crc kubenswrapper[5039]: I1124 14:30:00.151576 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399910-jzhfg"] Nov 24 14:30:00 crc kubenswrapper[5039]: I1124 14:30:00.153606 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399910-jzhfg" Nov 24 14:30:00 crc kubenswrapper[5039]: I1124 14:30:00.155802 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 14:30:00 crc kubenswrapper[5039]: I1124 14:30:00.157719 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 14:30:00 crc kubenswrapper[5039]: I1124 14:30:00.161514 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399910-jzhfg"] Nov 24 14:30:00 crc kubenswrapper[5039]: I1124 14:30:00.258807 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szchr\" (UniqueName: \"kubernetes.io/projected/5b38e1df-2624-4bb8-8656-51b4f9be00a0-kube-api-access-szchr\") pod \"collect-profiles-29399910-jzhfg\" (UID: \"5b38e1df-2624-4bb8-8656-51b4f9be00a0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399910-jzhfg" Nov 24 14:30:00 crc kubenswrapper[5039]: I1124 14:30:00.259428 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5b38e1df-2624-4bb8-8656-51b4f9be00a0-secret-volume\") pod \"collect-profiles-29399910-jzhfg\" (UID: \"5b38e1df-2624-4bb8-8656-51b4f9be00a0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399910-jzhfg" Nov 24 14:30:00 crc kubenswrapper[5039]: I1124 14:30:00.259495 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5b38e1df-2624-4bb8-8656-51b4f9be00a0-config-volume\") pod \"collect-profiles-29399910-jzhfg\" (UID: \"5b38e1df-2624-4bb8-8656-51b4f9be00a0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399910-jzhfg" Nov 24 14:30:00 crc kubenswrapper[5039]: I1124 14:30:00.362101 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5b38e1df-2624-4bb8-8656-51b4f9be00a0-secret-volume\") pod \"collect-profiles-29399910-jzhfg\" (UID: \"5b38e1df-2624-4bb8-8656-51b4f9be00a0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399910-jzhfg" Nov 24 14:30:00 crc kubenswrapper[5039]: I1124 14:30:00.362151 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5b38e1df-2624-4bb8-8656-51b4f9be00a0-config-volume\") pod \"collect-profiles-29399910-jzhfg\" (UID: \"5b38e1df-2624-4bb8-8656-51b4f9be00a0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399910-jzhfg" Nov 24 14:30:00 crc kubenswrapper[5039]: I1124 14:30:00.362224 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szchr\" (UniqueName: \"kubernetes.io/projected/5b38e1df-2624-4bb8-8656-51b4f9be00a0-kube-api-access-szchr\") pod \"collect-profiles-29399910-jzhfg\" (UID: \"5b38e1df-2624-4bb8-8656-51b4f9be00a0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399910-jzhfg" Nov 24 14:30:00 crc kubenswrapper[5039]: I1124 14:30:00.363976 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5b38e1df-2624-4bb8-8656-51b4f9be00a0-config-volume\") pod \"collect-profiles-29399910-jzhfg\" (UID: \"5b38e1df-2624-4bb8-8656-51b4f9be00a0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399910-jzhfg" Nov 24 14:30:00 crc kubenswrapper[5039]: I1124 14:30:00.372849 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 24 14:30:00 crc kubenswrapper[5039]: I1124 14:30:00.372892 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 24 14:30:00 crc kubenswrapper[5039]: I1124 14:30:00.388841 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5b38e1df-2624-4bb8-8656-51b4f9be00a0-secret-volume\") pod \"collect-profiles-29399910-jzhfg\" (UID: \"5b38e1df-2624-4bb8-8656-51b4f9be00a0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399910-jzhfg" Nov 24 14:30:00 crc kubenswrapper[5039]: I1124 14:30:00.389399 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szchr\" (UniqueName: \"kubernetes.io/projected/5b38e1df-2624-4bb8-8656-51b4f9be00a0-kube-api-access-szchr\") pod \"collect-profiles-29399910-jzhfg\" (UID: \"5b38e1df-2624-4bb8-8656-51b4f9be00a0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399910-jzhfg" Nov 24 14:30:00 crc kubenswrapper[5039]: I1124 14:30:00.484393 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399910-jzhfg" Nov 24 14:30:00 crc kubenswrapper[5039]: I1124 14:30:00.517524 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-fd8bc7b7f-2mcnl" Nov 24 14:30:00 crc kubenswrapper[5039]: I1124 14:30:00.743964 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-76c7f554dc-ctpbw" Nov 24 14:30:03 crc kubenswrapper[5039]: I1124 14:30:03.243282 5039 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 14:30:03 crc kubenswrapper[5039]: I1124 14:30:03.334544 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399910-jzhfg"] Nov 24 14:30:03 crc kubenswrapper[5039]: I1124 14:30:03.970224 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:30:03 crc kubenswrapper[5039]: I1124 14:30:03.970640 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:30:04 crc kubenswrapper[5039]: I1124 14:30:04.133294 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:30:04 crc kubenswrapper[5039]: I1124 14:30:04.133707 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:30:04 crc kubenswrapper[5039]: I1124 14:30:04.254969 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-2th7h" event={"ID":"42960250-7fd2-4db6-9670-dfbe653c2713","Type":"ContainerStarted","Data":"e097a0bd56c67847fa002a9d01e01f3945f804fe690abdda16c508927ebb3b10"} Nov 24 14:30:04 crc kubenswrapper[5039]: I1124 14:30:04.259203 5039 generic.go:334] "Generic (PLEG): container finished" podID="5b38e1df-2624-4bb8-8656-51b4f9be00a0" containerID="9456d55cdb9d8455b56028155a6448096807eeb277d65cfe469cc936bff2c229" exitCode=0 Nov 24 14:30:04 crc kubenswrapper[5039]: I1124 14:30:04.260322 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399910-jzhfg" event={"ID":"5b38e1df-2624-4bb8-8656-51b4f9be00a0","Type":"ContainerDied","Data":"9456d55cdb9d8455b56028155a6448096807eeb277d65cfe469cc936bff2c229"} Nov 24 14:30:04 crc kubenswrapper[5039]: I1124 14:30:04.260360 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399910-jzhfg" event={"ID":"5b38e1df-2624-4bb8-8656-51b4f9be00a0","Type":"ContainerStarted","Data":"fa5c8697662ccfabac1bf3dcff59024d83d6b6de0a7ecba2f478142137b9491c"} Nov 24 14:30:04 crc kubenswrapper[5039]: I1124 14:30:04.276232 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-db-sync-2th7h" podStartSLOduration=7.672377508 podStartE2EDuration="14.276216373s" podCreationTimestamp="2025-11-24 14:29:50 +0000 UTC" firstStartedPulling="2025-11-24 14:29:55.27612473 +0000 UTC m=+4307.715249230" lastFinishedPulling="2025-11-24 14:30:01.879963595 +0000 UTC m=+4314.319088095" observedRunningTime="2025-11-24 14:30:04.274781367 +0000 UTC m=+4316.713905867" watchObservedRunningTime="2025-11-24 14:30:04.276216373 +0000 UTC m=+4316.715340873" Nov 24 14:30:04 crc kubenswrapper[5039]: I1124 14:30:04.588207 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 24 14:30:04 crc kubenswrapper[5039]: I1124 14:30:04.712933 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 24 14:30:04 crc kubenswrapper[5039]: I1124 14:30:04.713119 5039 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 14:30:04 crc kubenswrapper[5039]: I1124 14:30:04.933290 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 24 14:30:05 crc kubenswrapper[5039]: I1124 14:30:05.795703 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399910-jzhfg" Nov 24 14:30:05 crc kubenswrapper[5039]: I1124 14:30:05.900812 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5b38e1df-2624-4bb8-8656-51b4f9be00a0-config-volume\") pod \"5b38e1df-2624-4bb8-8656-51b4f9be00a0\" (UID: \"5b38e1df-2624-4bb8-8656-51b4f9be00a0\") " Nov 24 14:30:05 crc kubenswrapper[5039]: I1124 14:30:05.901029 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5b38e1df-2624-4bb8-8656-51b4f9be00a0-secret-volume\") pod \"5b38e1df-2624-4bb8-8656-51b4f9be00a0\" (UID: \"5b38e1df-2624-4bb8-8656-51b4f9be00a0\") " Nov 24 14:30:05 crc kubenswrapper[5039]: I1124 14:30:05.901084 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-szchr\" (UniqueName: \"kubernetes.io/projected/5b38e1df-2624-4bb8-8656-51b4f9be00a0-kube-api-access-szchr\") pod \"5b38e1df-2624-4bb8-8656-51b4f9be00a0\" (UID: \"5b38e1df-2624-4bb8-8656-51b4f9be00a0\") " Nov 24 14:30:05 crc kubenswrapper[5039]: I1124 14:30:05.901838 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b38e1df-2624-4bb8-8656-51b4f9be00a0-config-volume" (OuterVolumeSpecName: "config-volume") pod "5b38e1df-2624-4bb8-8656-51b4f9be00a0" (UID: "5b38e1df-2624-4bb8-8656-51b4f9be00a0"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 14:30:05 crc kubenswrapper[5039]: I1124 14:30:05.902287 5039 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5b38e1df-2624-4bb8-8656-51b4f9be00a0-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:05 crc kubenswrapper[5039]: I1124 14:30:05.907737 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b38e1df-2624-4bb8-8656-51b4f9be00a0-kube-api-access-szchr" (OuterVolumeSpecName: "kube-api-access-szchr") pod "5b38e1df-2624-4bb8-8656-51b4f9be00a0" (UID: "5b38e1df-2624-4bb8-8656-51b4f9be00a0"). InnerVolumeSpecName "kube-api-access-szchr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:30:05 crc kubenswrapper[5039]: I1124 14:30:05.916313 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b38e1df-2624-4bb8-8656-51b4f9be00a0-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5b38e1df-2624-4bb8-8656-51b4f9be00a0" (UID: "5b38e1df-2624-4bb8-8656-51b4f9be00a0"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:06 crc kubenswrapper[5039]: I1124 14:30:06.004090 5039 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5b38e1df-2624-4bb8-8656-51b4f9be00a0-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:06 crc kubenswrapper[5039]: I1124 14:30:06.004144 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-szchr\" (UniqueName: \"kubernetes.io/projected/5b38e1df-2624-4bb8-8656-51b4f9be00a0-kube-api-access-szchr\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:06 crc kubenswrapper[5039]: I1124 14:30:06.280926 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399910-jzhfg" event={"ID":"5b38e1df-2624-4bb8-8656-51b4f9be00a0","Type":"ContainerDied","Data":"fa5c8697662ccfabac1bf3dcff59024d83d6b6de0a7ecba2f478142137b9491c"} Nov 24 14:30:06 crc kubenswrapper[5039]: I1124 14:30:06.280973 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fa5c8697662ccfabac1bf3dcff59024d83d6b6de0a7ecba2f478142137b9491c" Nov 24 14:30:06 crc kubenswrapper[5039]: I1124 14:30:06.281056 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399910-jzhfg" Nov 24 14:30:06 crc kubenswrapper[5039]: I1124 14:30:06.859007 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 24 14:30:06 crc kubenswrapper[5039]: I1124 14:30:06.879972 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q"] Nov 24 14:30:06 crc kubenswrapper[5039]: I1124 14:30:06.891993 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399865-rzs5q"] Nov 24 14:30:08 crc kubenswrapper[5039]: I1124 14:30:08.321103 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2298360a-7895-4303-a3f8-a32cfbe731c9" path="/var/lib/kubelet/pods/2298360a-7895-4303-a3f8-a32cfbe731c9/volumes" Nov 24 14:30:12 crc kubenswrapper[5039]: I1124 14:30:12.307411 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:30:12 crc kubenswrapper[5039]: E1124 14:30:12.308528 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:30:13 crc kubenswrapper[5039]: I1124 14:30:13.976866 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6c5b658bc4-625q2" podUID="ee231063-13a0-4a14-9864-362a8459b8e7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.69:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.69:8443: connect: connection refused" Nov 24 14:30:14 crc kubenswrapper[5039]: I1124 14:30:14.136467 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6fbc854bcb-ssv8l" podUID="4a0e58d4-73eb-4baf-8698-4c67b711e1a8" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.70:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.70:8443: connect: connection refused" Nov 24 14:30:14 crc kubenswrapper[5039]: I1124 14:30:14.374652 5039 generic.go:334] "Generic (PLEG): container finished" podID="42960250-7fd2-4db6-9670-dfbe653c2713" containerID="e097a0bd56c67847fa002a9d01e01f3945f804fe690abdda16c508927ebb3b10" exitCode=0 Nov 24 14:30:14 crc kubenswrapper[5039]: I1124 14:30:14.374697 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-2th7h" event={"ID":"42960250-7fd2-4db6-9670-dfbe653c2713","Type":"ContainerDied","Data":"e097a0bd56c67847fa002a9d01e01f3945f804fe690abdda16c508927ebb3b10"} Nov 24 14:30:15 crc kubenswrapper[5039]: I1124 14:30:15.984590 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-2th7h" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.057127 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/42960250-7fd2-4db6-9670-dfbe653c2713-job-config-data\") pod \"42960250-7fd2-4db6-9670-dfbe653c2713\" (UID: \"42960250-7fd2-4db6-9670-dfbe653c2713\") " Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.057263 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42960250-7fd2-4db6-9670-dfbe653c2713-config-data\") pod \"42960250-7fd2-4db6-9670-dfbe653c2713\" (UID: \"42960250-7fd2-4db6-9670-dfbe653c2713\") " Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.057309 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjtb8\" (UniqueName: \"kubernetes.io/projected/42960250-7fd2-4db6-9670-dfbe653c2713-kube-api-access-hjtb8\") pod \"42960250-7fd2-4db6-9670-dfbe653c2713\" (UID: \"42960250-7fd2-4db6-9670-dfbe653c2713\") " Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.057394 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42960250-7fd2-4db6-9670-dfbe653c2713-combined-ca-bundle\") pod \"42960250-7fd2-4db6-9670-dfbe653c2713\" (UID: \"42960250-7fd2-4db6-9670-dfbe653c2713\") " Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.066812 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42960250-7fd2-4db6-9670-dfbe653c2713-kube-api-access-hjtb8" (OuterVolumeSpecName: "kube-api-access-hjtb8") pod "42960250-7fd2-4db6-9670-dfbe653c2713" (UID: "42960250-7fd2-4db6-9670-dfbe653c2713"). InnerVolumeSpecName "kube-api-access-hjtb8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.070369 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42960250-7fd2-4db6-9670-dfbe653c2713-config-data" (OuterVolumeSpecName: "config-data") pod "42960250-7fd2-4db6-9670-dfbe653c2713" (UID: "42960250-7fd2-4db6-9670-dfbe653c2713"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.092751 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42960250-7fd2-4db6-9670-dfbe653c2713-job-config-data" (OuterVolumeSpecName: "job-config-data") pod "42960250-7fd2-4db6-9670-dfbe653c2713" (UID: "42960250-7fd2-4db6-9670-dfbe653c2713"). InnerVolumeSpecName "job-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.103011 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42960250-7fd2-4db6-9670-dfbe653c2713-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "42960250-7fd2-4db6-9670-dfbe653c2713" (UID: "42960250-7fd2-4db6-9670-dfbe653c2713"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.160967 5039 reconciler_common.go:293] "Volume detached for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/42960250-7fd2-4db6-9670-dfbe653c2713-job-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.161034 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42960250-7fd2-4db6-9670-dfbe653c2713-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.161047 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjtb8\" (UniqueName: \"kubernetes.io/projected/42960250-7fd2-4db6-9670-dfbe653c2713-kube-api-access-hjtb8\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.161079 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42960250-7fd2-4db6-9670-dfbe653c2713-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.394588 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-2th7h" event={"ID":"42960250-7fd2-4db6-9670-dfbe653c2713","Type":"ContainerDied","Data":"cf026da70a0109e4f9030a1bc9922f80041f80a796bed17b0fbd9bb6e7cd33a1"} Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.394630 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cf026da70a0109e4f9030a1bc9922f80041f80a796bed17b0fbd9bb6e7cd33a1" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.394678 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-2th7h" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.715205 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Nov 24 14:30:16 crc kubenswrapper[5039]: E1124 14:30:16.715844 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b38e1df-2624-4bb8-8656-51b4f9be00a0" containerName="collect-profiles" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.715859 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b38e1df-2624-4bb8-8656-51b4f9be00a0" containerName="collect-profiles" Nov 24 14:30:16 crc kubenswrapper[5039]: E1124 14:30:16.715880 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42960250-7fd2-4db6-9670-dfbe653c2713" containerName="manila-db-sync" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.715886 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="42960250-7fd2-4db6-9670-dfbe653c2713" containerName="manila-db-sync" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.716103 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="42960250-7fd2-4db6-9670-dfbe653c2713" containerName="manila-db-sync" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.720060 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b38e1df-2624-4bb8-8656-51b4f9be00a0" containerName="collect-profiles" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.721258 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.724282 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-7579r" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.725816 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scripts" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.726133 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.731144 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.735581 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.738026 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.745986 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.753103 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.780268 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-scripts\") pod \"manila-scheduler-0\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.780356 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.780398 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-scripts\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.780433 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5prvk\" (UniqueName: \"kubernetes.io/projected/49c0901c-2c86-40c0-8f18-56b111088afa-kube-api-access-5prvk\") pod \"manila-scheduler-0\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.780460 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.780482 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-ceph\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.780545 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.780573 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-config-data\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.780635 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fc9kh\" (UniqueName: \"kubernetes.io/projected/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-kube-api-access-fc9kh\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.780689 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.780716 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.780735 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-config-data\") pod \"manila-scheduler-0\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.780773 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/49c0901c-2c86-40c0-8f18-56b111088afa-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.780862 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.806375 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.883389 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-scripts\") pod \"manila-scheduler-0\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.883492 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.883554 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-scripts\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.883590 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5prvk\" (UniqueName: \"kubernetes.io/projected/49c0901c-2c86-40c0-8f18-56b111088afa-kube-api-access-5prvk\") pod \"manila-scheduler-0\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.883618 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.883636 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-ceph\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.883675 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.883708 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-config-data\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.883770 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fc9kh\" (UniqueName: \"kubernetes.io/projected/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-kube-api-access-fc9kh\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.883820 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.883844 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.883864 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-config-data\") pod \"manila-scheduler-0\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.883895 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/49c0901c-2c86-40c0-8f18-56b111088afa-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.883979 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.885688 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.896593 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-74cfff99f-ptjrg"] Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.898985 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.904906 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/49c0901c-2c86-40c0-8f18-56b111088afa-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.907362 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.987955 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f0ced711-f251-4bc4-b59c-4955f950f20d-dns-swift-storage-0\") pod \"dnsmasq-dns-74cfff99f-ptjrg\" (UID: \"f0ced711-f251-4bc4-b59c-4955f950f20d\") " pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:16 crc kubenswrapper[5039]: I1124 14:30:16.988000 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f0ced711-f251-4bc4-b59c-4955f950f20d-openstack-edpm-ipam\") pod \"dnsmasq-dns-74cfff99f-ptjrg\" (UID: \"f0ced711-f251-4bc4-b59c-4955f950f20d\") " pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:16.988044 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f0ced711-f251-4bc4-b59c-4955f950f20d-ovsdbserver-sb\") pod \"dnsmasq-dns-74cfff99f-ptjrg\" (UID: \"f0ced711-f251-4bc4-b59c-4955f950f20d\") " pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:16.988069 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0ced711-f251-4bc4-b59c-4955f950f20d-ovsdbserver-nb\") pod \"dnsmasq-dns-74cfff99f-ptjrg\" (UID: \"f0ced711-f251-4bc4-b59c-4955f950f20d\") " pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:16.988214 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0ced711-f251-4bc4-b59c-4955f950f20d-dns-svc\") pod \"dnsmasq-dns-74cfff99f-ptjrg\" (UID: \"f0ced711-f251-4bc4-b59c-4955f950f20d\") " pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:16.988256 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnpwl\" (UniqueName: \"kubernetes.io/projected/f0ced711-f251-4bc4-b59c-4955f950f20d-kube-api-access-pnpwl\") pod \"dnsmasq-dns-74cfff99f-ptjrg\" (UID: \"f0ced711-f251-4bc4-b59c-4955f950f20d\") " pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:16.988299 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0ced711-f251-4bc4-b59c-4955f950f20d-config\") pod \"dnsmasq-dns-74cfff99f-ptjrg\" (UID: \"f0ced711-f251-4bc4-b59c-4955f950f20d\") " pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:16.988412 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74cfff99f-ptjrg"] Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.092834 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0ced711-f251-4bc4-b59c-4955f950f20d-dns-svc\") pod \"dnsmasq-dns-74cfff99f-ptjrg\" (UID: \"f0ced711-f251-4bc4-b59c-4955f950f20d\") " pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.092897 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnpwl\" (UniqueName: \"kubernetes.io/projected/f0ced711-f251-4bc4-b59c-4955f950f20d-kube-api-access-pnpwl\") pod \"dnsmasq-dns-74cfff99f-ptjrg\" (UID: \"f0ced711-f251-4bc4-b59c-4955f950f20d\") " pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.092938 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0ced711-f251-4bc4-b59c-4955f950f20d-config\") pod \"dnsmasq-dns-74cfff99f-ptjrg\" (UID: \"f0ced711-f251-4bc4-b59c-4955f950f20d\") " pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.093007 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f0ced711-f251-4bc4-b59c-4955f950f20d-dns-swift-storage-0\") pod \"dnsmasq-dns-74cfff99f-ptjrg\" (UID: \"f0ced711-f251-4bc4-b59c-4955f950f20d\") " pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.093026 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f0ced711-f251-4bc4-b59c-4955f950f20d-openstack-edpm-ipam\") pod \"dnsmasq-dns-74cfff99f-ptjrg\" (UID: \"f0ced711-f251-4bc4-b59c-4955f950f20d\") " pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.093063 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f0ced711-f251-4bc4-b59c-4955f950f20d-ovsdbserver-sb\") pod \"dnsmasq-dns-74cfff99f-ptjrg\" (UID: \"f0ced711-f251-4bc4-b59c-4955f950f20d\") " pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.093084 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0ced711-f251-4bc4-b59c-4955f950f20d-ovsdbserver-nb\") pod \"dnsmasq-dns-74cfff99f-ptjrg\" (UID: \"f0ced711-f251-4bc4-b59c-4955f950f20d\") " pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.094174 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0ced711-f251-4bc4-b59c-4955f950f20d-ovsdbserver-nb\") pod \"dnsmasq-dns-74cfff99f-ptjrg\" (UID: \"f0ced711-f251-4bc4-b59c-4955f950f20d\") " pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.094206 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0ced711-f251-4bc4-b59c-4955f950f20d-dns-svc\") pod \"dnsmasq-dns-74cfff99f-ptjrg\" (UID: \"f0ced711-f251-4bc4-b59c-4955f950f20d\") " pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.095326 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0ced711-f251-4bc4-b59c-4955f950f20d-config\") pod \"dnsmasq-dns-74cfff99f-ptjrg\" (UID: \"f0ced711-f251-4bc4-b59c-4955f950f20d\") " pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.095985 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f0ced711-f251-4bc4-b59c-4955f950f20d-dns-swift-storage-0\") pod \"dnsmasq-dns-74cfff99f-ptjrg\" (UID: \"f0ced711-f251-4bc4-b59c-4955f950f20d\") " pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.096727 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f0ced711-f251-4bc4-b59c-4955f950f20d-ovsdbserver-sb\") pod \"dnsmasq-dns-74cfff99f-ptjrg\" (UID: \"f0ced711-f251-4bc4-b59c-4955f950f20d\") " pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.096727 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f0ced711-f251-4bc4-b59c-4955f950f20d-openstack-edpm-ipam\") pod \"dnsmasq-dns-74cfff99f-ptjrg\" (UID: \"f0ced711-f251-4bc4-b59c-4955f950f20d\") " pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.165332 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.167355 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.169989 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.227588 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.302138 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f740b81a-80b8-462b-a303-3804b01ac8fc-logs\") pod \"manila-api-0\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.302224 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-scripts\") pod \"manila-api-0\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.302299 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbplq\" (UniqueName: \"kubernetes.io/projected/f740b81a-80b8-462b-a303-3804b01ac8fc-kube-api-access-xbplq\") pod \"manila-api-0\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.302421 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f740b81a-80b8-462b-a303-3804b01ac8fc-etc-machine-id\") pod \"manila-api-0\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.302485 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-config-data-custom\") pod \"manila-api-0\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.302561 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-config-data\") pod \"manila-api-0\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.302618 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.404975 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f740b81a-80b8-462b-a303-3804b01ac8fc-etc-machine-id\") pod \"manila-api-0\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.405057 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-config-data-custom\") pod \"manila-api-0\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.405118 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f740b81a-80b8-462b-a303-3804b01ac8fc-etc-machine-id\") pod \"manila-api-0\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.405131 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-config-data\") pod \"manila-api-0\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.405225 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.405381 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f740b81a-80b8-462b-a303-3804b01ac8fc-logs\") pod \"manila-api-0\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.405445 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-scripts\") pod \"manila-api-0\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.405522 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbplq\" (UniqueName: \"kubernetes.io/projected/f740b81a-80b8-462b-a303-3804b01ac8fc-kube-api-access-xbplq\") pod \"manila-api-0\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.406083 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f740b81a-80b8-462b-a303-3804b01ac8fc-logs\") pod \"manila-api-0\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.479647 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.483971 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.484361 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.484558 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-scripts\") pod \"manila-scheduler-0\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.485009 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-ceph\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.485037 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-config-data\") pod \"manila-scheduler-0\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.485454 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-scripts\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.490209 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-config-data\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.490365 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fc9kh\" (UniqueName: \"kubernetes.io/projected/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-kube-api-access-fc9kh\") pod \"manila-share-share1-0\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.490788 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5prvk\" (UniqueName: \"kubernetes.io/projected/49c0901c-2c86-40c0-8f18-56b111088afa-kube-api-access-5prvk\") pod \"manila-scheduler-0\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.492164 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-scripts\") pod \"manila-api-0\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.492186 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnpwl\" (UniqueName: \"kubernetes.io/projected/f0ced711-f251-4bc4-b59c-4955f950f20d-kube-api-access-pnpwl\") pod \"dnsmasq-dns-74cfff99f-ptjrg\" (UID: \"f0ced711-f251-4bc4-b59c-4955f950f20d\") " pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.492186 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.492896 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-config-data\") pod \"manila-api-0\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.493476 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.493871 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-config-data-custom\") pod \"manila-api-0\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.515144 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbplq\" (UniqueName: \"kubernetes.io/projected/f740b81a-80b8-462b-a303-3804b01ac8fc-kube-api-access-xbplq\") pod \"manila-api-0\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.663890 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.678171 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.690172 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 24 14:30:17 crc kubenswrapper[5039]: I1124 14:30:17.701238 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 24 14:30:18 crc kubenswrapper[5039]: I1124 14:30:18.696658 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74cfff99f-ptjrg"] Nov 24 14:30:18 crc kubenswrapper[5039]: W1124 14:30:18.700751 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf0ced711_f251_4bc4_b59c_4955f950f20d.slice/crio-dc8b799a63c3ae9272fbcad02ceeeec5b074ad7a993e67e2dd40e2fe9707e41b WatchSource:0}: Error finding container dc8b799a63c3ae9272fbcad02ceeeec5b074ad7a993e67e2dd40e2fe9707e41b: Status 404 returned error can't find the container with id dc8b799a63c3ae9272fbcad02ceeeec5b074ad7a993e67e2dd40e2fe9707e41b Nov 24 14:30:18 crc kubenswrapper[5039]: I1124 14:30:18.700950 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 14:30:18 crc kubenswrapper[5039]: I1124 14:30:18.708100 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 24 14:30:18 crc kubenswrapper[5039]: I1124 14:30:18.785343 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 24 14:30:18 crc kubenswrapper[5039]: W1124 14:30:18.796575 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0e10bdf5_f36b_4b1d_b0ae_2304b07be38e.slice/crio-e02ad467d3a73621882a69a2f9043c6b8ba9275c53c100438deb5f39c5a047fb WatchSource:0}: Error finding container e02ad467d3a73621882a69a2f9043c6b8ba9275c53c100438deb5f39c5a047fb: Status 404 returned error can't find the container with id e02ad467d3a73621882a69a2f9043c6b8ba9275c53c100438deb5f39c5a047fb Nov 24 14:30:19 crc kubenswrapper[5039]: I1124 14:30:19.463178 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" event={"ID":"f0ced711-f251-4bc4-b59c-4955f950f20d","Type":"ContainerStarted","Data":"78c3bbe5d51d0cc8cf8958ccd35201c1db68b009b892ae66b982f4665c1635b5"} Nov 24 14:30:19 crc kubenswrapper[5039]: I1124 14:30:19.463624 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" event={"ID":"f0ced711-f251-4bc4-b59c-4955f950f20d","Type":"ContainerStarted","Data":"dc8b799a63c3ae9272fbcad02ceeeec5b074ad7a993e67e2dd40e2fe9707e41b"} Nov 24 14:30:19 crc kubenswrapper[5039]: I1124 14:30:19.469837 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"49c0901c-2c86-40c0-8f18-56b111088afa","Type":"ContainerStarted","Data":"2d04ad9cb5bfc6e70cab87d6f0120b59124dcb2bcd62267631688aa2c21f1729"} Nov 24 14:30:19 crc kubenswrapper[5039]: I1124 14:30:19.475985 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e","Type":"ContainerStarted","Data":"e02ad467d3a73621882a69a2f9043c6b8ba9275c53c100438deb5f39c5a047fb"} Nov 24 14:30:19 crc kubenswrapper[5039]: I1124 14:30:19.634836 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 24 14:30:19 crc kubenswrapper[5039]: W1124 14:30:19.966546 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf740b81a_80b8_462b_a303_3804b01ac8fc.slice/crio-cc0e51f7c6cf5a5cf3e6e335cca6195601a042f5dbe950e0e6303721fbebd0b4 WatchSource:0}: Error finding container cc0e51f7c6cf5a5cf3e6e335cca6195601a042f5dbe950e0e6303721fbebd0b4: Status 404 returned error can't find the container with id cc0e51f7c6cf5a5cf3e6e335cca6195601a042f5dbe950e0e6303721fbebd0b4 Nov 24 14:30:20 crc kubenswrapper[5039]: I1124 14:30:20.143122 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-api-0"] Nov 24 14:30:20 crc kubenswrapper[5039]: I1124 14:30:20.500317 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"f740b81a-80b8-462b-a303-3804b01ac8fc","Type":"ContainerStarted","Data":"cc0e51f7c6cf5a5cf3e6e335cca6195601a042f5dbe950e0e6303721fbebd0b4"} Nov 24 14:30:20 crc kubenswrapper[5039]: I1124 14:30:20.502637 5039 generic.go:334] "Generic (PLEG): container finished" podID="f0ced711-f251-4bc4-b59c-4955f950f20d" containerID="78c3bbe5d51d0cc8cf8958ccd35201c1db68b009b892ae66b982f4665c1635b5" exitCode=0 Nov 24 14:30:20 crc kubenswrapper[5039]: I1124 14:30:20.502690 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" event={"ID":"f0ced711-f251-4bc4-b59c-4955f950f20d","Type":"ContainerDied","Data":"78c3bbe5d51d0cc8cf8958ccd35201c1db68b009b892ae66b982f4665c1635b5"} Nov 24 14:30:21 crc kubenswrapper[5039]: I1124 14:30:21.547698 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"f740b81a-80b8-462b-a303-3804b01ac8fc","Type":"ContainerStarted","Data":"54890fc2c564027de5b5339e047580d80fe13a0c7c1e0b5c8b26ad4e0591cba6"} Nov 24 14:30:21 crc kubenswrapper[5039]: I1124 14:30:21.556664 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" event={"ID":"f0ced711-f251-4bc4-b59c-4955f950f20d","Type":"ContainerStarted","Data":"0a270113facc769f87268a73c010a4c0041e8c5a092fa00f1c85b5ec7e39600d"} Nov 24 14:30:21 crc kubenswrapper[5039]: I1124 14:30:21.556745 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:21 crc kubenswrapper[5039]: I1124 14:30:21.560168 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"49c0901c-2c86-40c0-8f18-56b111088afa","Type":"ContainerStarted","Data":"e3eadfa09be4e7f9da5b573003282b192cbee945628285e368cf9660be58a41f"} Nov 24 14:30:21 crc kubenswrapper[5039]: I1124 14:30:21.582841 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" podStartSLOduration=5.582817925 podStartE2EDuration="5.582817925s" podCreationTimestamp="2025-11-24 14:30:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 14:30:21.575134607 +0000 UTC m=+4334.014259117" watchObservedRunningTime="2025-11-24 14:30:21.582817925 +0000 UTC m=+4334.021942425" Nov 24 14:30:22 crc kubenswrapper[5039]: I1124 14:30:22.491258 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 14:30:22 crc kubenswrapper[5039]: I1124 14:30:22.491909 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" containerName="ceilometer-notification-agent" containerID="cri-o://676ad49a9cf447ed89ee4328121b62dea57f7d5bac27fc3c01e258a256979041" gracePeriod=30 Nov 24 14:30:22 crc kubenswrapper[5039]: I1124 14:30:22.491926 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" containerName="ceilometer-central-agent" containerID="cri-o://d7d5b28c0a12345292200d4ec7b441408e55ebf9f0f12ab3fe09723cee8869da" gracePeriod=30 Nov 24 14:30:22 crc kubenswrapper[5039]: I1124 14:30:22.491986 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" containerName="proxy-httpd" containerID="cri-o://ef9919777fea4d0846a31936b070b22baec3d41ef9ecdbf05f9bf5dec75dfb41" gracePeriod=30 Nov 24 14:30:22 crc kubenswrapper[5039]: I1124 14:30:22.492014 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" containerName="sg-core" containerID="cri-o://d7ed1854953f5beccc1c80d3aa08f9cc44a999bd3d53f012a7882ae1b779de73" gracePeriod=30 Nov 24 14:30:22 crc kubenswrapper[5039]: I1124 14:30:22.574704 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"f740b81a-80b8-462b-a303-3804b01ac8fc","Type":"ContainerStarted","Data":"16b16db151a684a67b3c5796f235087ea3bcde6f8b70bafca93b3c680db72069"} Nov 24 14:30:22 crc kubenswrapper[5039]: I1124 14:30:22.574882 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="f740b81a-80b8-462b-a303-3804b01ac8fc" containerName="manila-api-log" containerID="cri-o://54890fc2c564027de5b5339e047580d80fe13a0c7c1e0b5c8b26ad4e0591cba6" gracePeriod=30 Nov 24 14:30:22 crc kubenswrapper[5039]: I1124 14:30:22.575023 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 24 14:30:22 crc kubenswrapper[5039]: I1124 14:30:22.575636 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="f740b81a-80b8-462b-a303-3804b01ac8fc" containerName="manila-api" containerID="cri-o://16b16db151a684a67b3c5796f235087ea3bcde6f8b70bafca93b3c680db72069" gracePeriod=30 Nov 24 14:30:22 crc kubenswrapper[5039]: I1124 14:30:22.578568 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"49c0901c-2c86-40c0-8f18-56b111088afa","Type":"ContainerStarted","Data":"feb3ca8cb3db94a776c82aa07045c90a8c1887257d63ad579c762491e3778a36"} Nov 24 14:30:22 crc kubenswrapper[5039]: I1124 14:30:22.602126 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=5.602102032 podStartE2EDuration="5.602102032s" podCreationTimestamp="2025-11-24 14:30:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 14:30:22.594352703 +0000 UTC m=+4335.033477193" watchObservedRunningTime="2025-11-24 14:30:22.602102032 +0000 UTC m=+4335.041226532" Nov 24 14:30:22 crc kubenswrapper[5039]: I1124 14:30:22.636987 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=5.130669581 podStartE2EDuration="6.636963804s" podCreationTimestamp="2025-11-24 14:30:16 +0000 UTC" firstStartedPulling="2025-11-24 14:30:18.700747371 +0000 UTC m=+4331.139871871" lastFinishedPulling="2025-11-24 14:30:20.207041594 +0000 UTC m=+4332.646166094" observedRunningTime="2025-11-24 14:30:22.623753042 +0000 UTC m=+4335.062877542" watchObservedRunningTime="2025-11-24 14:30:22.636963804 +0000 UTC m=+4335.076088304" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.313538 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:30:23 crc kubenswrapper[5039]: E1124 14:30:23.313968 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.385043 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.489025 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xbplq\" (UniqueName: \"kubernetes.io/projected/f740b81a-80b8-462b-a303-3804b01ac8fc-kube-api-access-xbplq\") pod \"f740b81a-80b8-462b-a303-3804b01ac8fc\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.489145 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-scripts\") pod \"f740b81a-80b8-462b-a303-3804b01ac8fc\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.489241 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f740b81a-80b8-462b-a303-3804b01ac8fc-etc-machine-id\") pod \"f740b81a-80b8-462b-a303-3804b01ac8fc\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.489268 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-combined-ca-bundle\") pod \"f740b81a-80b8-462b-a303-3804b01ac8fc\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.489294 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-config-data-custom\") pod \"f740b81a-80b8-462b-a303-3804b01ac8fc\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.489408 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-config-data\") pod \"f740b81a-80b8-462b-a303-3804b01ac8fc\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.489531 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f740b81a-80b8-462b-a303-3804b01ac8fc-logs\") pod \"f740b81a-80b8-462b-a303-3804b01ac8fc\" (UID: \"f740b81a-80b8-462b-a303-3804b01ac8fc\") " Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.490731 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f740b81a-80b8-462b-a303-3804b01ac8fc-logs" (OuterVolumeSpecName: "logs") pod "f740b81a-80b8-462b-a303-3804b01ac8fc" (UID: "f740b81a-80b8-462b-a303-3804b01ac8fc"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.490779 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f740b81a-80b8-462b-a303-3804b01ac8fc-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "f740b81a-80b8-462b-a303-3804b01ac8fc" (UID: "f740b81a-80b8-462b-a303-3804b01ac8fc"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.496947 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f740b81a-80b8-462b-a303-3804b01ac8fc" (UID: "f740b81a-80b8-462b-a303-3804b01ac8fc"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.497886 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-scripts" (OuterVolumeSpecName: "scripts") pod "f740b81a-80b8-462b-a303-3804b01ac8fc" (UID: "f740b81a-80b8-462b-a303-3804b01ac8fc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.499974 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f740b81a-80b8-462b-a303-3804b01ac8fc-kube-api-access-xbplq" (OuterVolumeSpecName: "kube-api-access-xbplq") pod "f740b81a-80b8-462b-a303-3804b01ac8fc" (UID: "f740b81a-80b8-462b-a303-3804b01ac8fc"). InnerVolumeSpecName "kube-api-access-xbplq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.529896 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f740b81a-80b8-462b-a303-3804b01ac8fc" (UID: "f740b81a-80b8-462b-a303-3804b01ac8fc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.563173 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-config-data" (OuterVolumeSpecName: "config-data") pod "f740b81a-80b8-462b-a303-3804b01ac8fc" (UID: "f740b81a-80b8-462b-a303-3804b01ac8fc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.592140 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.592581 5039 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f740b81a-80b8-462b-a303-3804b01ac8fc-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.592596 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.592609 5039 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.592621 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f740b81a-80b8-462b-a303-3804b01ac8fc-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.592634 5039 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f740b81a-80b8-462b-a303-3804b01ac8fc-logs\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.592646 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xbplq\" (UniqueName: \"kubernetes.io/projected/f740b81a-80b8-462b-a303-3804b01ac8fc-kube-api-access-xbplq\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.602931 5039 generic.go:334] "Generic (PLEG): container finished" podID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" containerID="d7d5b28c0a12345292200d4ec7b441408e55ebf9f0f12ab3fe09723cee8869da" exitCode=0 Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.602983 5039 generic.go:334] "Generic (PLEG): container finished" podID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" containerID="ef9919777fea4d0846a31936b070b22baec3d41ef9ecdbf05f9bf5dec75dfb41" exitCode=0 Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.602993 5039 generic.go:334] "Generic (PLEG): container finished" podID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" containerID="d7ed1854953f5beccc1c80d3aa08f9cc44a999bd3d53f012a7882ae1b779de73" exitCode=2 Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.603061 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"831e28f3-74a0-4b52-933c-1a3e7a7811f6","Type":"ContainerDied","Data":"d7d5b28c0a12345292200d4ec7b441408e55ebf9f0f12ab3fe09723cee8869da"} Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.603104 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"831e28f3-74a0-4b52-933c-1a3e7a7811f6","Type":"ContainerDied","Data":"ef9919777fea4d0846a31936b070b22baec3d41ef9ecdbf05f9bf5dec75dfb41"} Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.603116 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"831e28f3-74a0-4b52-933c-1a3e7a7811f6","Type":"ContainerDied","Data":"d7ed1854953f5beccc1c80d3aa08f9cc44a999bd3d53f012a7882ae1b779de73"} Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.607053 5039 generic.go:334] "Generic (PLEG): container finished" podID="f740b81a-80b8-462b-a303-3804b01ac8fc" containerID="16b16db151a684a67b3c5796f235087ea3bcde6f8b70bafca93b3c680db72069" exitCode=0 Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.607107 5039 generic.go:334] "Generic (PLEG): container finished" podID="f740b81a-80b8-462b-a303-3804b01ac8fc" containerID="54890fc2c564027de5b5339e047580d80fe13a0c7c1e0b5c8b26ad4e0591cba6" exitCode=143 Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.607553 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.609251 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"f740b81a-80b8-462b-a303-3804b01ac8fc","Type":"ContainerDied","Data":"16b16db151a684a67b3c5796f235087ea3bcde6f8b70bafca93b3c680db72069"} Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.609313 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"f740b81a-80b8-462b-a303-3804b01ac8fc","Type":"ContainerDied","Data":"54890fc2c564027de5b5339e047580d80fe13a0c7c1e0b5c8b26ad4e0591cba6"} Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.609336 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"f740b81a-80b8-462b-a303-3804b01ac8fc","Type":"ContainerDied","Data":"cc0e51f7c6cf5a5cf3e6e335cca6195601a042f5dbe950e0e6303721fbebd0b4"} Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.609361 5039 scope.go:117] "RemoveContainer" containerID="16b16db151a684a67b3c5796f235087ea3bcde6f8b70bafca93b3c680db72069" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.698058 5039 scope.go:117] "RemoveContainer" containerID="54890fc2c564027de5b5339e047580d80fe13a0c7c1e0b5c8b26ad4e0591cba6" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.706155 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-api-0"] Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.723039 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-api-0"] Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.747205 5039 scope.go:117] "RemoveContainer" containerID="16b16db151a684a67b3c5796f235087ea3bcde6f8b70bafca93b3c680db72069" Nov 24 14:30:23 crc kubenswrapper[5039]: E1124 14:30:23.751942 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16b16db151a684a67b3c5796f235087ea3bcde6f8b70bafca93b3c680db72069\": container with ID starting with 16b16db151a684a67b3c5796f235087ea3bcde6f8b70bafca93b3c680db72069 not found: ID does not exist" containerID="16b16db151a684a67b3c5796f235087ea3bcde6f8b70bafca93b3c680db72069" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.752023 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16b16db151a684a67b3c5796f235087ea3bcde6f8b70bafca93b3c680db72069"} err="failed to get container status \"16b16db151a684a67b3c5796f235087ea3bcde6f8b70bafca93b3c680db72069\": rpc error: code = NotFound desc = could not find container \"16b16db151a684a67b3c5796f235087ea3bcde6f8b70bafca93b3c680db72069\": container with ID starting with 16b16db151a684a67b3c5796f235087ea3bcde6f8b70bafca93b3c680db72069 not found: ID does not exist" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.752051 5039 scope.go:117] "RemoveContainer" containerID="54890fc2c564027de5b5339e047580d80fe13a0c7c1e0b5c8b26ad4e0591cba6" Nov 24 14:30:23 crc kubenswrapper[5039]: E1124 14:30:23.752527 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54890fc2c564027de5b5339e047580d80fe13a0c7c1e0b5c8b26ad4e0591cba6\": container with ID starting with 54890fc2c564027de5b5339e047580d80fe13a0c7c1e0b5c8b26ad4e0591cba6 not found: ID does not exist" containerID="54890fc2c564027de5b5339e047580d80fe13a0c7c1e0b5c8b26ad4e0591cba6" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.752549 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54890fc2c564027de5b5339e047580d80fe13a0c7c1e0b5c8b26ad4e0591cba6"} err="failed to get container status \"54890fc2c564027de5b5339e047580d80fe13a0c7c1e0b5c8b26ad4e0591cba6\": rpc error: code = NotFound desc = could not find container \"54890fc2c564027de5b5339e047580d80fe13a0c7c1e0b5c8b26ad4e0591cba6\": container with ID starting with 54890fc2c564027de5b5339e047580d80fe13a0c7c1e0b5c8b26ad4e0591cba6 not found: ID does not exist" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.752563 5039 scope.go:117] "RemoveContainer" containerID="16b16db151a684a67b3c5796f235087ea3bcde6f8b70bafca93b3c680db72069" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.753001 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16b16db151a684a67b3c5796f235087ea3bcde6f8b70bafca93b3c680db72069"} err="failed to get container status \"16b16db151a684a67b3c5796f235087ea3bcde6f8b70bafca93b3c680db72069\": rpc error: code = NotFound desc = could not find container \"16b16db151a684a67b3c5796f235087ea3bcde6f8b70bafca93b3c680db72069\": container with ID starting with 16b16db151a684a67b3c5796f235087ea3bcde6f8b70bafca93b3c680db72069 not found: ID does not exist" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.753021 5039 scope.go:117] "RemoveContainer" containerID="54890fc2c564027de5b5339e047580d80fe13a0c7c1e0b5c8b26ad4e0591cba6" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.753326 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54890fc2c564027de5b5339e047580d80fe13a0c7c1e0b5c8b26ad4e0591cba6"} err="failed to get container status \"54890fc2c564027de5b5339e047580d80fe13a0c7c1e0b5c8b26ad4e0591cba6\": rpc error: code = NotFound desc = could not find container \"54890fc2c564027de5b5339e047580d80fe13a0c7c1e0b5c8b26ad4e0591cba6\": container with ID starting with 54890fc2c564027de5b5339e047580d80fe13a0c7c1e0b5c8b26ad4e0591cba6 not found: ID does not exist" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.762949 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Nov 24 14:30:23 crc kubenswrapper[5039]: E1124 14:30:23.763441 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f740b81a-80b8-462b-a303-3804b01ac8fc" containerName="manila-api-log" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.763460 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="f740b81a-80b8-462b-a303-3804b01ac8fc" containerName="manila-api-log" Nov 24 14:30:23 crc kubenswrapper[5039]: E1124 14:30:23.763494 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f740b81a-80b8-462b-a303-3804b01ac8fc" containerName="manila-api" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.763520 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="f740b81a-80b8-462b-a303-3804b01ac8fc" containerName="manila-api" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.765781 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="f740b81a-80b8-462b-a303-3804b01ac8fc" containerName="manila-api-log" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.765862 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="f740b81a-80b8-462b-a303-3804b01ac8fc" containerName="manila-api" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.771160 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.774235 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.774524 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-manila-internal-svc" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.774672 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-manila-public-svc" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.776420 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.901329 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/92bc16f0-cdd7-4437-aa94-57bf0cd83126-scripts\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.901429 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/92bc16f0-cdd7-4437-aa94-57bf0cd83126-internal-tls-certs\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.901460 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/92bc16f0-cdd7-4437-aa94-57bf0cd83126-public-tls-certs\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.901496 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92bc16f0-cdd7-4437-aa94-57bf0cd83126-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.901582 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/92bc16f0-cdd7-4437-aa94-57bf0cd83126-config-data-custom\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.901630 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/92bc16f0-cdd7-4437-aa94-57bf0cd83126-logs\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.901653 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/92bc16f0-cdd7-4437-aa94-57bf0cd83126-etc-machine-id\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.901721 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56hfs\" (UniqueName: \"kubernetes.io/projected/92bc16f0-cdd7-4437-aa94-57bf0cd83126-kube-api-access-56hfs\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:23 crc kubenswrapper[5039]: I1124 14:30:23.901751 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/92bc16f0-cdd7-4437-aa94-57bf0cd83126-config-data\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:24 crc kubenswrapper[5039]: I1124 14:30:24.003699 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/92bc16f0-cdd7-4437-aa94-57bf0cd83126-scripts\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:24 crc kubenswrapper[5039]: I1124 14:30:24.003818 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/92bc16f0-cdd7-4437-aa94-57bf0cd83126-internal-tls-certs\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:24 crc kubenswrapper[5039]: I1124 14:30:24.003855 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/92bc16f0-cdd7-4437-aa94-57bf0cd83126-public-tls-certs\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:24 crc kubenswrapper[5039]: I1124 14:30:24.003919 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92bc16f0-cdd7-4437-aa94-57bf0cd83126-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:24 crc kubenswrapper[5039]: I1124 14:30:24.003947 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/92bc16f0-cdd7-4437-aa94-57bf0cd83126-config-data-custom\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:24 crc kubenswrapper[5039]: I1124 14:30:24.003981 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/92bc16f0-cdd7-4437-aa94-57bf0cd83126-logs\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:24 crc kubenswrapper[5039]: I1124 14:30:24.004038 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/92bc16f0-cdd7-4437-aa94-57bf0cd83126-etc-machine-id\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:24 crc kubenswrapper[5039]: I1124 14:30:24.004120 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56hfs\" (UniqueName: \"kubernetes.io/projected/92bc16f0-cdd7-4437-aa94-57bf0cd83126-kube-api-access-56hfs\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:24 crc kubenswrapper[5039]: I1124 14:30:24.004162 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/92bc16f0-cdd7-4437-aa94-57bf0cd83126-config-data\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:24 crc kubenswrapper[5039]: I1124 14:30:24.004421 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/92bc16f0-cdd7-4437-aa94-57bf0cd83126-etc-machine-id\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:24 crc kubenswrapper[5039]: I1124 14:30:24.004849 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/92bc16f0-cdd7-4437-aa94-57bf0cd83126-logs\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:24 crc kubenswrapper[5039]: I1124 14:30:24.008995 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/92bc16f0-cdd7-4437-aa94-57bf0cd83126-scripts\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:24 crc kubenswrapper[5039]: I1124 14:30:24.009445 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/92bc16f0-cdd7-4437-aa94-57bf0cd83126-config-data\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:24 crc kubenswrapper[5039]: I1124 14:30:24.009468 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/92bc16f0-cdd7-4437-aa94-57bf0cd83126-config-data-custom\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:24 crc kubenswrapper[5039]: I1124 14:30:24.010234 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/92bc16f0-cdd7-4437-aa94-57bf0cd83126-public-tls-certs\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:24 crc kubenswrapper[5039]: I1124 14:30:24.010420 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/92bc16f0-cdd7-4437-aa94-57bf0cd83126-internal-tls-certs\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:24 crc kubenswrapper[5039]: I1124 14:30:24.023165 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92bc16f0-cdd7-4437-aa94-57bf0cd83126-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:24 crc kubenswrapper[5039]: I1124 14:30:24.025130 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56hfs\" (UniqueName: \"kubernetes.io/projected/92bc16f0-cdd7-4437-aa94-57bf0cd83126-kube-api-access-56hfs\") pod \"manila-api-0\" (UID: \"92bc16f0-cdd7-4437-aa94-57bf0cd83126\") " pod="openstack/manila-api-0" Nov 24 14:30:24 crc kubenswrapper[5039]: I1124 14:30:24.088406 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 24 14:30:24 crc kubenswrapper[5039]: I1124 14:30:24.343962 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f740b81a-80b8-462b-a303-3804b01ac8fc" path="/var/lib/kubelet/pods/f740b81a-80b8-462b-a303-3804b01ac8fc/volumes" Nov 24 14:30:24 crc kubenswrapper[5039]: I1124 14:30:24.868092 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 24 14:30:25 crc kubenswrapper[5039]: I1124 14:30:25.657067 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"92bc16f0-cdd7-4437-aa94-57bf0cd83126","Type":"ContainerStarted","Data":"365a57135037937f642bd21b5f0439be0ec680eeb98f7a842b0dbe16c78792d3"} Nov 24 14:30:25 crc kubenswrapper[5039]: I1124 14:30:25.661683 5039 generic.go:334] "Generic (PLEG): container finished" podID="3943960b-1e53-413a-9dd9-505fe98db72d" containerID="b29cd16920bf897136ddba32f54dc338246d34d667368df3aa562dc9041032ae" exitCode=137 Nov 24 14:30:25 crc kubenswrapper[5039]: I1124 14:30:25.661723 5039 generic.go:334] "Generic (PLEG): container finished" podID="3943960b-1e53-413a-9dd9-505fe98db72d" containerID="c24e8960d0a07ce16a6321bb797ed71d3f88109b75919a561c453705b98ac0af" exitCode=137 Nov 24 14:30:25 crc kubenswrapper[5039]: I1124 14:30:25.661783 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-fd8bc7b7f-2mcnl" event={"ID":"3943960b-1e53-413a-9dd9-505fe98db72d","Type":"ContainerDied","Data":"b29cd16920bf897136ddba32f54dc338246d34d667368df3aa562dc9041032ae"} Nov 24 14:30:25 crc kubenswrapper[5039]: I1124 14:30:25.661813 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-fd8bc7b7f-2mcnl" event={"ID":"3943960b-1e53-413a-9dd9-505fe98db72d","Type":"ContainerDied","Data":"c24e8960d0a07ce16a6321bb797ed71d3f88109b75919a561c453705b98ac0af"} Nov 24 14:30:25 crc kubenswrapper[5039]: I1124 14:30:25.663465 5039 generic.go:334] "Generic (PLEG): container finished" podID="9f956cc7-2ab6-4818-8d36-09cbd169b9b2" containerID="6166b96488451436dcc3cdb61d575d0c3dff77f100ae949a59254bf6a559ba52" exitCode=137 Nov 24 14:30:25 crc kubenswrapper[5039]: I1124 14:30:25.663483 5039 generic.go:334] "Generic (PLEG): container finished" podID="9f956cc7-2ab6-4818-8d36-09cbd169b9b2" containerID="9894b19bd17da2eda60b53363718f20a91bc4721b85a456121e60cfe5a83ae30" exitCode=137 Nov 24 14:30:25 crc kubenswrapper[5039]: I1124 14:30:25.663511 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-76c7f554dc-ctpbw" event={"ID":"9f956cc7-2ab6-4818-8d36-09cbd169b9b2","Type":"ContainerDied","Data":"6166b96488451436dcc3cdb61d575d0c3dff77f100ae949a59254bf6a559ba52"} Nov 24 14:30:25 crc kubenswrapper[5039]: I1124 14:30:25.663533 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-76c7f554dc-ctpbw" event={"ID":"9f956cc7-2ab6-4818-8d36-09cbd169b9b2","Type":"ContainerDied","Data":"9894b19bd17da2eda60b53363718f20a91bc4721b85a456121e60cfe5a83ae30"} Nov 24 14:30:26 crc kubenswrapper[5039]: I1124 14:30:26.676332 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"92bc16f0-cdd7-4437-aa94-57bf0cd83126","Type":"ContainerStarted","Data":"023be60f2a337f2418142c3d342eb58574b845c206a99c49487ed14da6822266"} Nov 24 14:30:26 crc kubenswrapper[5039]: I1124 14:30:26.958348 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:30:26 crc kubenswrapper[5039]: I1124 14:30:26.965068 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:30:27 crc kubenswrapper[5039]: I1124 14:30:27.664487 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Nov 24 14:30:27 crc kubenswrapper[5039]: I1124 14:30:27.681077 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-74cfff99f-ptjrg" Nov 24 14:30:27 crc kubenswrapper[5039]: I1124 14:30:27.694228 5039 generic.go:334] "Generic (PLEG): container finished" podID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" containerID="676ad49a9cf447ed89ee4328121b62dea57f7d5bac27fc3c01e258a256979041" exitCode=0 Nov 24 14:30:27 crc kubenswrapper[5039]: I1124 14:30:27.694279 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"831e28f3-74a0-4b52-933c-1a3e7a7811f6","Type":"ContainerDied","Data":"676ad49a9cf447ed89ee4328121b62dea57f7d5bac27fc3c01e258a256979041"} Nov 24 14:30:27 crc kubenswrapper[5039]: I1124 14:30:27.750284 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-768b698657-svwhq"] Nov 24 14:30:27 crc kubenswrapper[5039]: I1124 14:30:27.750699 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-768b698657-svwhq" podUID="34d32473-00c1-407b-b009-0d43c17038f9" containerName="dnsmasq-dns" containerID="cri-o://dba3100ce911db3502d42c230d3b265f5953b08786d65209227b2e44659334ad" gracePeriod=10 Nov 24 14:30:28 crc kubenswrapper[5039]: I1124 14:30:28.513601 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-768b698657-svwhq" podUID="34d32473-00c1-407b-b009-0d43c17038f9" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.1.0:5353: connect: connection refused" Nov 24 14:30:28 crc kubenswrapper[5039]: I1124 14:30:28.710111 5039 generic.go:334] "Generic (PLEG): container finished" podID="34d32473-00c1-407b-b009-0d43c17038f9" containerID="dba3100ce911db3502d42c230d3b265f5953b08786d65209227b2e44659334ad" exitCode=0 Nov 24 14:30:28 crc kubenswrapper[5039]: I1124 14:30:28.710169 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-768b698657-svwhq" event={"ID":"34d32473-00c1-407b-b009-0d43c17038f9","Type":"ContainerDied","Data":"dba3100ce911db3502d42c230d3b265f5953b08786d65209227b2e44659334ad"} Nov 24 14:30:29 crc kubenswrapper[5039]: I1124 14:30:29.106095 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-6fbc854bcb-ssv8l" Nov 24 14:30:29 crc kubenswrapper[5039]: I1124 14:30:29.191213 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6c5b658bc4-625q2"] Nov 24 14:30:29 crc kubenswrapper[5039]: I1124 14:30:29.191497 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6c5b658bc4-625q2" podUID="ee231063-13a0-4a14-9864-362a8459b8e7" containerName="horizon-log" containerID="cri-o://d49ed4e675c018d6675328a5efa7109f8ba13218f9a5be6b7962ff27a851bccd" gracePeriod=30 Nov 24 14:30:29 crc kubenswrapper[5039]: I1124 14:30:29.191659 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6c5b658bc4-625q2" podUID="ee231063-13a0-4a14-9864-362a8459b8e7" containerName="horizon" containerID="cri-o://c83ec4fbe52dbd8b6865ecf997689e3c1746c09451f88b0f608e7cd6c1b6502d" gracePeriod=30 Nov 24 14:30:29 crc kubenswrapper[5039]: I1124 14:30:29.197589 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-6c5b658bc4-625q2" podUID="ee231063-13a0-4a14-9864-362a8459b8e7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.69:8443/dashboard/auth/login/?next=/dashboard/\": EOF" Nov 24 14:30:29 crc kubenswrapper[5039]: I1124 14:30:29.731086 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-76c7f554dc-ctpbw" event={"ID":"9f956cc7-2ab6-4818-8d36-09cbd169b9b2","Type":"ContainerDied","Data":"414a38a61c47e811cbbbd709ef9a2454c2fb74e407be69ad74dae3f865fc0188"} Nov 24 14:30:29 crc kubenswrapper[5039]: I1124 14:30:29.732130 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="414a38a61c47e811cbbbd709ef9a2454c2fb74e407be69ad74dae3f865fc0188" Nov 24 14:30:29 crc kubenswrapper[5039]: I1124 14:30:29.987763 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-76c7f554dc-ctpbw" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.007166 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-fd8bc7b7f-2mcnl" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.049568 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3943960b-1e53-413a-9dd9-505fe98db72d-horizon-secret-key\") pod \"3943960b-1e53-413a-9dd9-505fe98db72d\" (UID: \"3943960b-1e53-413a-9dd9-505fe98db72d\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.049614 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3943960b-1e53-413a-9dd9-505fe98db72d-logs\") pod \"3943960b-1e53-413a-9dd9-505fe98db72d\" (UID: \"3943960b-1e53-413a-9dd9-505fe98db72d\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.049642 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gb4gj\" (UniqueName: \"kubernetes.io/projected/3943960b-1e53-413a-9dd9-505fe98db72d-kube-api-access-gb4gj\") pod \"3943960b-1e53-413a-9dd9-505fe98db72d\" (UID: \"3943960b-1e53-413a-9dd9-505fe98db72d\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.049665 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-config-data\") pod \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\" (UID: \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.049715 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3943960b-1e53-413a-9dd9-505fe98db72d-scripts\") pod \"3943960b-1e53-413a-9dd9-505fe98db72d\" (UID: \"3943960b-1e53-413a-9dd9-505fe98db72d\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.049774 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-horizon-secret-key\") pod \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\" (UID: \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.049808 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xfkdf\" (UniqueName: \"kubernetes.io/projected/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-kube-api-access-xfkdf\") pod \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\" (UID: \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.049856 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3943960b-1e53-413a-9dd9-505fe98db72d-config-data\") pod \"3943960b-1e53-413a-9dd9-505fe98db72d\" (UID: \"3943960b-1e53-413a-9dd9-505fe98db72d\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.049874 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-scripts\") pod \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\" (UID: \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.049981 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-logs\") pod \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\" (UID: \"9f956cc7-2ab6-4818-8d36-09cbd169b9b2\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.051014 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-logs" (OuterVolumeSpecName: "logs") pod "9f956cc7-2ab6-4818-8d36-09cbd169b9b2" (UID: "9f956cc7-2ab6-4818-8d36-09cbd169b9b2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.056680 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3943960b-1e53-413a-9dd9-505fe98db72d-logs" (OuterVolumeSpecName: "logs") pod "3943960b-1e53-413a-9dd9-505fe98db72d" (UID: "3943960b-1e53-413a-9dd9-505fe98db72d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.056911 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3943960b-1e53-413a-9dd9-505fe98db72d-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "3943960b-1e53-413a-9dd9-505fe98db72d" (UID: "3943960b-1e53-413a-9dd9-505fe98db72d"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.063504 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3943960b-1e53-413a-9dd9-505fe98db72d-kube-api-access-gb4gj" (OuterVolumeSpecName: "kube-api-access-gb4gj") pod "3943960b-1e53-413a-9dd9-505fe98db72d" (UID: "3943960b-1e53-413a-9dd9-505fe98db72d"). InnerVolumeSpecName "kube-api-access-gb4gj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.064258 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-kube-api-access-xfkdf" (OuterVolumeSpecName: "kube-api-access-xfkdf") pod "9f956cc7-2ab6-4818-8d36-09cbd169b9b2" (UID: "9f956cc7-2ab6-4818-8d36-09cbd169b9b2"). InnerVolumeSpecName "kube-api-access-xfkdf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.064535 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "9f956cc7-2ab6-4818-8d36-09cbd169b9b2" (UID: "9f956cc7-2ab6-4818-8d36-09cbd169b9b2"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.095315 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3943960b-1e53-413a-9dd9-505fe98db72d-config-data" (OuterVolumeSpecName: "config-data") pod "3943960b-1e53-413a-9dd9-505fe98db72d" (UID: "3943960b-1e53-413a-9dd9-505fe98db72d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.153053 5039 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.153412 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xfkdf\" (UniqueName: \"kubernetes.io/projected/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-kube-api-access-xfkdf\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.153429 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3943960b-1e53-413a-9dd9-505fe98db72d-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.153440 5039 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-logs\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.153452 5039 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3943960b-1e53-413a-9dd9-505fe98db72d-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.153461 5039 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3943960b-1e53-413a-9dd9-505fe98db72d-logs\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.153471 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gb4gj\" (UniqueName: \"kubernetes.io/projected/3943960b-1e53-413a-9dd9-505fe98db72d-kube-api-access-gb4gj\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.186234 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3943960b-1e53-413a-9dd9-505fe98db72d-scripts" (OuterVolumeSpecName: "scripts") pod "3943960b-1e53-413a-9dd9-505fe98db72d" (UID: "3943960b-1e53-413a-9dd9-505fe98db72d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.186450 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-config-data" (OuterVolumeSpecName: "config-data") pod "9f956cc7-2ab6-4818-8d36-09cbd169b9b2" (UID: "9f956cc7-2ab6-4818-8d36-09cbd169b9b2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.186666 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-scripts" (OuterVolumeSpecName: "scripts") pod "9f956cc7-2ab6-4818-8d36-09cbd169b9b2" (UID: "9f956cc7-2ab6-4818-8d36-09cbd169b9b2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.255846 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.255883 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3943960b-1e53-413a-9dd9-505fe98db72d-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.255892 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9f956cc7-2ab6-4818-8d36-09cbd169b9b2-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.361951 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.484134 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.561675 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-combined-ca-bundle\") pod \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.561730 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-sg-core-conf-yaml\") pod \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.561768 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-ceilometer-tls-certs\") pod \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.561822 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/831e28f3-74a0-4b52-933c-1a3e7a7811f6-run-httpd\") pod \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.561857 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-config-data\") pod \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.561917 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-scripts\") pod \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.562025 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/831e28f3-74a0-4b52-933c-1a3e7a7811f6-log-httpd\") pod \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.562135 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bhg7n\" (UniqueName: \"kubernetes.io/projected/831e28f3-74a0-4b52-933c-1a3e7a7811f6-kube-api-access-bhg7n\") pod \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\" (UID: \"831e28f3-74a0-4b52-933c-1a3e7a7811f6\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.562617 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/831e28f3-74a0-4b52-933c-1a3e7a7811f6-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "831e28f3-74a0-4b52-933c-1a3e7a7811f6" (UID: "831e28f3-74a0-4b52-933c-1a3e7a7811f6"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.563252 5039 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/831e28f3-74a0-4b52-933c-1a3e7a7811f6-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.563757 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/831e28f3-74a0-4b52-933c-1a3e7a7811f6-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "831e28f3-74a0-4b52-933c-1a3e7a7811f6" (UID: "831e28f3-74a0-4b52-933c-1a3e7a7811f6"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.573116 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-scripts" (OuterVolumeSpecName: "scripts") pod "831e28f3-74a0-4b52-933c-1a3e7a7811f6" (UID: "831e28f3-74a0-4b52-933c-1a3e7a7811f6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.573254 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/831e28f3-74a0-4b52-933c-1a3e7a7811f6-kube-api-access-bhg7n" (OuterVolumeSpecName: "kube-api-access-bhg7n") pod "831e28f3-74a0-4b52-933c-1a3e7a7811f6" (UID: "831e28f3-74a0-4b52-933c-1a3e7a7811f6"). InnerVolumeSpecName "kube-api-access-bhg7n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.651514 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "831e28f3-74a0-4b52-933c-1a3e7a7811f6" (UID: "831e28f3-74a0-4b52-933c-1a3e7a7811f6"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.666199 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-openstack-edpm-ipam\") pod \"34d32473-00c1-407b-b009-0d43c17038f9\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.666266 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-ovsdbserver-sb\") pod \"34d32473-00c1-407b-b009-0d43c17038f9\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.666350 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-dns-svc\") pod \"34d32473-00c1-407b-b009-0d43c17038f9\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.666381 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-config\") pod \"34d32473-00c1-407b-b009-0d43c17038f9\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.666463 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bsw2z\" (UniqueName: \"kubernetes.io/projected/34d32473-00c1-407b-b009-0d43c17038f9-kube-api-access-bsw2z\") pod \"34d32473-00c1-407b-b009-0d43c17038f9\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.666492 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-ovsdbserver-nb\") pod \"34d32473-00c1-407b-b009-0d43c17038f9\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.666552 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-dns-swift-storage-0\") pod \"34d32473-00c1-407b-b009-0d43c17038f9\" (UID: \"34d32473-00c1-407b-b009-0d43c17038f9\") " Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.667047 5039 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.667058 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.667066 5039 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/831e28f3-74a0-4b52-933c-1a3e7a7811f6-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.667075 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bhg7n\" (UniqueName: \"kubernetes.io/projected/831e28f3-74a0-4b52-933c-1a3e7a7811f6-kube-api-access-bhg7n\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.675416 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34d32473-00c1-407b-b009-0d43c17038f9-kube-api-access-bsw2z" (OuterVolumeSpecName: "kube-api-access-bsw2z") pod "34d32473-00c1-407b-b009-0d43c17038f9" (UID: "34d32473-00c1-407b-b009-0d43c17038f9"). InnerVolumeSpecName "kube-api-access-bsw2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.717615 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "831e28f3-74a0-4b52-933c-1a3e7a7811f6" (UID: "831e28f3-74a0-4b52-933c-1a3e7a7811f6"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.744624 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-768b698657-svwhq" event={"ID":"34d32473-00c1-407b-b009-0d43c17038f9","Type":"ContainerDied","Data":"2acd68dfad98b66f8db726637344fa9d41011ffc9f44a393b4023e0e7842374d"} Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.744676 5039 scope.go:117] "RemoveContainer" containerID="dba3100ce911db3502d42c230d3b265f5953b08786d65209227b2e44659334ad" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.745901 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-768b698657-svwhq" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.760001 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"92bc16f0-cdd7-4437-aa94-57bf0cd83126","Type":"ContainerStarted","Data":"edd5caf8bf31debd9d8c50a6ee087dcb44e799f911967ceafc373908c0032ec2"} Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.760701 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.768306 5039 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.768333 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bsw2z\" (UniqueName: \"kubernetes.io/projected/34d32473-00c1-407b-b009-0d43c17038f9-kube-api-access-bsw2z\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.769324 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-fd8bc7b7f-2mcnl" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.769345 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-fd8bc7b7f-2mcnl" event={"ID":"3943960b-1e53-413a-9dd9-505fe98db72d","Type":"ContainerDied","Data":"4ff08e493527c72bac4978a764094847353a57fc1601635885fbfb385a02af8c"} Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.771582 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e","Type":"ContainerStarted","Data":"aa3606c8a46dac0ec9243ac17583744721466d005f0f31c3fbcc832b3e7254d4"} Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.777161 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.777134 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"831e28f3-74a0-4b52-933c-1a3e7a7811f6","Type":"ContainerDied","Data":"e3513345df51b6d01f88fab933a5b16e8579882d069408fa15fc49b9fa157ec5"} Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.778890 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-76c7f554dc-ctpbw" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.802176 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=7.802154118 podStartE2EDuration="7.802154118s" podCreationTimestamp="2025-11-24 14:30:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 14:30:30.783329127 +0000 UTC m=+4343.222453637" watchObservedRunningTime="2025-11-24 14:30:30.802154118 +0000 UTC m=+4343.241278618" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.836798 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "34d32473-00c1-407b-b009-0d43c17038f9" (UID: "34d32473-00c1-407b-b009-0d43c17038f9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.853932 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-config" (OuterVolumeSpecName: "config") pod "34d32473-00c1-407b-b009-0d43c17038f9" (UID: "34d32473-00c1-407b-b009-0d43c17038f9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.867525 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "34d32473-00c1-407b-b009-0d43c17038f9" (UID: "34d32473-00c1-407b-b009-0d43c17038f9"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.873208 5039 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.873230 5039 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-config\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.873253 5039 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.891604 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "34d32473-00c1-407b-b009-0d43c17038f9" (UID: "34d32473-00c1-407b-b009-0d43c17038f9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.897261 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-config-data" (OuterVolumeSpecName: "config-data") pod "831e28f3-74a0-4b52-933c-1a3e7a7811f6" (UID: "831e28f3-74a0-4b52-933c-1a3e7a7811f6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.898147 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "34d32473-00c1-407b-b009-0d43c17038f9" (UID: "34d32473-00c1-407b-b009-0d43c17038f9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.913978 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "34d32473-00c1-407b-b009-0d43c17038f9" (UID: "34d32473-00c1-407b-b009-0d43c17038f9"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.956726 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "831e28f3-74a0-4b52-933c-1a3e7a7811f6" (UID: "831e28f3-74a0-4b52-933c-1a3e7a7811f6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.975521 5039 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.975556 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.975566 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.975575 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/831e28f3-74a0-4b52-933c-1a3e7a7811f6-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:30 crc kubenswrapper[5039]: I1124 14:30:30.975583 5039 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/34d32473-00c1-407b-b009-0d43c17038f9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.072192 5039 scope.go:117] "RemoveContainer" containerID="543e077dd91f790cca1736e95c1900b57a91f00a1272f00b039764beaca6589f" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.110444 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-fd8bc7b7f-2mcnl"] Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.122959 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-fd8bc7b7f-2mcnl"] Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.142598 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-76c7f554dc-ctpbw"] Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.162127 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-76c7f554dc-ctpbw"] Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.165680 5039 scope.go:117] "RemoveContainer" containerID="b29cd16920bf897136ddba32f54dc338246d34d667368df3aa562dc9041032ae" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.178487 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-768b698657-svwhq"] Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.208119 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-768b698657-svwhq"] Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.227616 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.241934 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.255254 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 14:30:31 crc kubenswrapper[5039]: E1124 14:30:31.255701 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f956cc7-2ab6-4818-8d36-09cbd169b9b2" containerName="horizon-log" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.255715 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f956cc7-2ab6-4818-8d36-09cbd169b9b2" containerName="horizon-log" Nov 24 14:30:31 crc kubenswrapper[5039]: E1124 14:30:31.255730 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34d32473-00c1-407b-b009-0d43c17038f9" containerName="dnsmasq-dns" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.255736 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="34d32473-00c1-407b-b009-0d43c17038f9" containerName="dnsmasq-dns" Nov 24 14:30:31 crc kubenswrapper[5039]: E1124 14:30:31.255746 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" containerName="ceilometer-notification-agent" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.255752 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" containerName="ceilometer-notification-agent" Nov 24 14:30:31 crc kubenswrapper[5039]: E1124 14:30:31.255762 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" containerName="sg-core" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.255767 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" containerName="sg-core" Nov 24 14:30:31 crc kubenswrapper[5039]: E1124 14:30:31.255778 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3943960b-1e53-413a-9dd9-505fe98db72d" containerName="horizon" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.255783 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="3943960b-1e53-413a-9dd9-505fe98db72d" containerName="horizon" Nov 24 14:30:31 crc kubenswrapper[5039]: E1124 14:30:31.255790 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" containerName="proxy-httpd" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.255795 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" containerName="proxy-httpd" Nov 24 14:30:31 crc kubenswrapper[5039]: E1124 14:30:31.255821 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34d32473-00c1-407b-b009-0d43c17038f9" containerName="init" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.255827 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="34d32473-00c1-407b-b009-0d43c17038f9" containerName="init" Nov 24 14:30:31 crc kubenswrapper[5039]: E1124 14:30:31.255842 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" containerName="ceilometer-central-agent" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.255847 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" containerName="ceilometer-central-agent" Nov 24 14:30:31 crc kubenswrapper[5039]: E1124 14:30:31.255858 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3943960b-1e53-413a-9dd9-505fe98db72d" containerName="horizon-log" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.255863 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="3943960b-1e53-413a-9dd9-505fe98db72d" containerName="horizon-log" Nov 24 14:30:31 crc kubenswrapper[5039]: E1124 14:30:31.255921 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f956cc7-2ab6-4818-8d36-09cbd169b9b2" containerName="horizon" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.255929 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f956cc7-2ab6-4818-8d36-09cbd169b9b2" containerName="horizon" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.256126 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="3943960b-1e53-413a-9dd9-505fe98db72d" containerName="horizon" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.256139 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="34d32473-00c1-407b-b009-0d43c17038f9" containerName="dnsmasq-dns" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.256147 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="3943960b-1e53-413a-9dd9-505fe98db72d" containerName="horizon-log" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.256160 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" containerName="proxy-httpd" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.256172 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" containerName="sg-core" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.256181 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f956cc7-2ab6-4818-8d36-09cbd169b9b2" containerName="horizon-log" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.256190 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" containerName="ceilometer-central-agent" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.256203 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" containerName="ceilometer-notification-agent" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.256211 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f956cc7-2ab6-4818-8d36-09cbd169b9b2" containerName="horizon" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.261118 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.267620 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.267894 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.267618 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.273512 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.394248 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.394356 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-scripts\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.394387 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.394434 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.394459 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52c7h\" (UniqueName: \"kubernetes.io/projected/e53f443e-499a-4a20-b97d-0c78bea1c68a-kube-api-access-52c7h\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.394486 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e53f443e-499a-4a20-b97d-0c78bea1c68a-log-httpd\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.394817 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-config-data\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.394894 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e53f443e-499a-4a20-b97d-0c78bea1c68a-run-httpd\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.401675 5039 scope.go:117] "RemoveContainer" containerID="c24e8960d0a07ce16a6321bb797ed71d3f88109b75919a561c453705b98ac0af" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.431204 5039 scope.go:117] "RemoveContainer" containerID="d7d5b28c0a12345292200d4ec7b441408e55ebf9f0f12ab3fe09723cee8869da" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.459453 5039 scope.go:117] "RemoveContainer" containerID="ef9919777fea4d0846a31936b070b22baec3d41ef9ecdbf05f9bf5dec75dfb41" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.482327 5039 scope.go:117] "RemoveContainer" containerID="d7ed1854953f5beccc1c80d3aa08f9cc44a999bd3d53f012a7882ae1b779de73" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.497533 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.497609 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-scripts\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.497636 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.497682 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.497706 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52c7h\" (UniqueName: \"kubernetes.io/projected/e53f443e-499a-4a20-b97d-0c78bea1c68a-kube-api-access-52c7h\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.497737 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e53f443e-499a-4a20-b97d-0c78bea1c68a-log-httpd\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.497795 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-config-data\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.497845 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e53f443e-499a-4a20-b97d-0c78bea1c68a-run-httpd\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.498454 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e53f443e-499a-4a20-b97d-0c78bea1c68a-run-httpd\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.499233 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e53f443e-499a-4a20-b97d-0c78bea1c68a-log-httpd\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.507410 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-config-data\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.508058 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-scripts\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.508511 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.515608 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.516266 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.518956 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52c7h\" (UniqueName: \"kubernetes.io/projected/e53f443e-499a-4a20-b97d-0c78bea1c68a-kube-api-access-52c7h\") pod \"ceilometer-0\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.519275 5039 scope.go:117] "RemoveContainer" containerID="676ad49a9cf447ed89ee4328121b62dea57f7d5bac27fc3c01e258a256979041" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.593696 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.843011 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e","Type":"ContainerStarted","Data":"980c94e18a354f430f9f10b6299157b5dc5d1ea52788e494a02e65ce9e46078f"} Nov 24 14:30:31 crc kubenswrapper[5039]: I1124 14:30:31.869175 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=4.817392484 podStartE2EDuration="15.869158991s" podCreationTimestamp="2025-11-24 14:30:16 +0000 UTC" firstStartedPulling="2025-11-24 14:30:18.800058669 +0000 UTC m=+4331.239183169" lastFinishedPulling="2025-11-24 14:30:29.851825176 +0000 UTC m=+4342.290949676" observedRunningTime="2025-11-24 14:30:31.861866743 +0000 UTC m=+4344.300991243" watchObservedRunningTime="2025-11-24 14:30:31.869158991 +0000 UTC m=+4344.308283491" Nov 24 14:30:32 crc kubenswrapper[5039]: I1124 14:30:32.091137 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 14:30:32 crc kubenswrapper[5039]: I1124 14:30:32.319763 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34d32473-00c1-407b-b009-0d43c17038f9" path="/var/lib/kubelet/pods/34d32473-00c1-407b-b009-0d43c17038f9/volumes" Nov 24 14:30:32 crc kubenswrapper[5039]: I1124 14:30:32.320635 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3943960b-1e53-413a-9dd9-505fe98db72d" path="/var/lib/kubelet/pods/3943960b-1e53-413a-9dd9-505fe98db72d/volumes" Nov 24 14:30:32 crc kubenswrapper[5039]: I1124 14:30:32.321244 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="831e28f3-74a0-4b52-933c-1a3e7a7811f6" path="/var/lib/kubelet/pods/831e28f3-74a0-4b52-933c-1a3e7a7811f6/volumes" Nov 24 14:30:32 crc kubenswrapper[5039]: I1124 14:30:32.322735 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f956cc7-2ab6-4818-8d36-09cbd169b9b2" path="/var/lib/kubelet/pods/9f956cc7-2ab6-4818-8d36-09cbd169b9b2/volumes" Nov 24 14:30:32 crc kubenswrapper[5039]: I1124 14:30:32.340149 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-6c5b658bc4-625q2" podUID="ee231063-13a0-4a14-9864-362a8459b8e7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.69:8443/dashboard/auth/login/?next=/dashboard/\": read tcp 10.217.0.2:33296->10.217.1.69:8443: read: connection reset by peer" Nov 24 14:30:32 crc kubenswrapper[5039]: I1124 14:30:32.677134 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 14:30:32 crc kubenswrapper[5039]: I1124 14:30:32.859867 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e53f443e-499a-4a20-b97d-0c78bea1c68a","Type":"ContainerStarted","Data":"36285c86c8a0988e681bded6df0b43f4b147e70e22a90416b661ed37522ae528"} Nov 24 14:30:32 crc kubenswrapper[5039]: I1124 14:30:32.859912 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e53f443e-499a-4a20-b97d-0c78bea1c68a","Type":"ContainerStarted","Data":"e4a39dee950783519f32dbf8fb1f284b381447f5fbdb51e11c7f7e5d15775659"} Nov 24 14:30:32 crc kubenswrapper[5039]: I1124 14:30:32.862547 5039 generic.go:334] "Generic (PLEG): container finished" podID="ee231063-13a0-4a14-9864-362a8459b8e7" containerID="c83ec4fbe52dbd8b6865ecf997689e3c1746c09451f88b0f608e7cd6c1b6502d" exitCode=0 Nov 24 14:30:32 crc kubenswrapper[5039]: I1124 14:30:32.862618 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c5b658bc4-625q2" event={"ID":"ee231063-13a0-4a14-9864-362a8459b8e7","Type":"ContainerDied","Data":"c83ec4fbe52dbd8b6865ecf997689e3c1746c09451f88b0f608e7cd6c1b6502d"} Nov 24 14:30:33 crc kubenswrapper[5039]: I1124 14:30:33.971221 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-6c5b658bc4-625q2" podUID="ee231063-13a0-4a14-9864-362a8459b8e7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.69:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.69:8443: connect: connection refused" Nov 24 14:30:34 crc kubenswrapper[5039]: I1124 14:30:34.896190 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e53f443e-499a-4a20-b97d-0c78bea1c68a","Type":"ContainerStarted","Data":"4f12bd009b170932a6af715ee18e177602353aa3c2622a09459957b1a8e3de46"} Nov 24 14:30:35 crc kubenswrapper[5039]: I1124 14:30:35.911273 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e53f443e-499a-4a20-b97d-0c78bea1c68a","Type":"ContainerStarted","Data":"4742a9552bde01d12b0b0208d7bb448681ebb145ddbc60a47ebb35d55e9d9006"} Nov 24 14:30:37 crc kubenswrapper[5039]: I1124 14:30:37.702312 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 24 14:30:37 crc kubenswrapper[5039]: I1124 14:30:37.939411 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e53f443e-499a-4a20-b97d-0c78bea1c68a","Type":"ContainerStarted","Data":"35e5734da9b22913e04de66868c040ba0403c2b331c6127d9a85a6cae8ebe839"} Nov 24 14:30:37 crc kubenswrapper[5039]: I1124 14:30:37.939603 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e53f443e-499a-4a20-b97d-0c78bea1c68a" containerName="ceilometer-central-agent" containerID="cri-o://36285c86c8a0988e681bded6df0b43f4b147e70e22a90416b661ed37522ae528" gracePeriod=30 Nov 24 14:30:37 crc kubenswrapper[5039]: I1124 14:30:37.939663 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e53f443e-499a-4a20-b97d-0c78bea1c68a" containerName="sg-core" containerID="cri-o://4742a9552bde01d12b0b0208d7bb448681ebb145ddbc60a47ebb35d55e9d9006" gracePeriod=30 Nov 24 14:30:37 crc kubenswrapper[5039]: I1124 14:30:37.939707 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e53f443e-499a-4a20-b97d-0c78bea1c68a" containerName="ceilometer-notification-agent" containerID="cri-o://4f12bd009b170932a6af715ee18e177602353aa3c2622a09459957b1a8e3de46" gracePeriod=30 Nov 24 14:30:37 crc kubenswrapper[5039]: I1124 14:30:37.939785 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e53f443e-499a-4a20-b97d-0c78bea1c68a" containerName="proxy-httpd" containerID="cri-o://35e5734da9b22913e04de66868c040ba0403c2b331c6127d9a85a6cae8ebe839" gracePeriod=30 Nov 24 14:30:37 crc kubenswrapper[5039]: I1124 14:30:37.939988 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 14:30:37 crc kubenswrapper[5039]: I1124 14:30:37.980393 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.179533213 podStartE2EDuration="6.980368953s" podCreationTimestamp="2025-11-24 14:30:31 +0000 UTC" firstStartedPulling="2025-11-24 14:30:32.097571605 +0000 UTC m=+4344.536696105" lastFinishedPulling="2025-11-24 14:30:36.898407345 +0000 UTC m=+4349.337531845" observedRunningTime="2025-11-24 14:30:37.960928729 +0000 UTC m=+4350.400053229" watchObservedRunningTime="2025-11-24 14:30:37.980368953 +0000 UTC m=+4350.419493463" Nov 24 14:30:38 crc kubenswrapper[5039]: I1124 14:30:38.314235 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:30:38 crc kubenswrapper[5039]: E1124 14:30:38.314615 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:30:38 crc kubenswrapper[5039]: I1124 14:30:38.962248 5039 generic.go:334] "Generic (PLEG): container finished" podID="e53f443e-499a-4a20-b97d-0c78bea1c68a" containerID="35e5734da9b22913e04de66868c040ba0403c2b331c6127d9a85a6cae8ebe839" exitCode=0 Nov 24 14:30:38 crc kubenswrapper[5039]: I1124 14:30:38.962623 5039 generic.go:334] "Generic (PLEG): container finished" podID="e53f443e-499a-4a20-b97d-0c78bea1c68a" containerID="4742a9552bde01d12b0b0208d7bb448681ebb145ddbc60a47ebb35d55e9d9006" exitCode=2 Nov 24 14:30:38 crc kubenswrapper[5039]: I1124 14:30:38.962641 5039 generic.go:334] "Generic (PLEG): container finished" podID="e53f443e-499a-4a20-b97d-0c78bea1c68a" containerID="4f12bd009b170932a6af715ee18e177602353aa3c2622a09459957b1a8e3de46" exitCode=0 Nov 24 14:30:38 crc kubenswrapper[5039]: I1124 14:30:38.962335 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e53f443e-499a-4a20-b97d-0c78bea1c68a","Type":"ContainerDied","Data":"35e5734da9b22913e04de66868c040ba0403c2b331c6127d9a85a6cae8ebe839"} Nov 24 14:30:38 crc kubenswrapper[5039]: I1124 14:30:38.962686 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e53f443e-499a-4a20-b97d-0c78bea1c68a","Type":"ContainerDied","Data":"4742a9552bde01d12b0b0208d7bb448681ebb145ddbc60a47ebb35d55e9d9006"} Nov 24 14:30:38 crc kubenswrapper[5039]: I1124 14:30:38.962722 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e53f443e-499a-4a20-b97d-0c78bea1c68a","Type":"ContainerDied","Data":"4f12bd009b170932a6af715ee18e177602353aa3c2622a09459957b1a8e3de46"} Nov 24 14:30:39 crc kubenswrapper[5039]: I1124 14:30:39.497097 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Nov 24 14:30:39 crc kubenswrapper[5039]: I1124 14:30:39.564547 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-scheduler-0"] Nov 24 14:30:39 crc kubenswrapper[5039]: I1124 14:30:39.972219 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-scheduler-0" podUID="49c0901c-2c86-40c0-8f18-56b111088afa" containerName="manila-scheduler" containerID="cri-o://e3eadfa09be4e7f9da5b573003282b192cbee945628285e368cf9660be58a41f" gracePeriod=30 Nov 24 14:30:39 crc kubenswrapper[5039]: I1124 14:30:39.972299 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-scheduler-0" podUID="49c0901c-2c86-40c0-8f18-56b111088afa" containerName="probe" containerID="cri-o://feb3ca8cb3db94a776c82aa07045c90a8c1887257d63ad579c762491e3778a36" gracePeriod=30 Nov 24 14:30:40 crc kubenswrapper[5039]: I1124 14:30:40.985947 5039 generic.go:334] "Generic (PLEG): container finished" podID="49c0901c-2c86-40c0-8f18-56b111088afa" containerID="feb3ca8cb3db94a776c82aa07045c90a8c1887257d63ad579c762491e3778a36" exitCode=0 Nov 24 14:30:40 crc kubenswrapper[5039]: I1124 14:30:40.986003 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"49c0901c-2c86-40c0-8f18-56b111088afa","Type":"ContainerDied","Data":"feb3ca8cb3db94a776c82aa07045c90a8c1887257d63ad579c762491e3778a36"} Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.010848 5039 generic.go:334] "Generic (PLEG): container finished" podID="e53f443e-499a-4a20-b97d-0c78bea1c68a" containerID="36285c86c8a0988e681bded6df0b43f4b147e70e22a90416b661ed37522ae528" exitCode=0 Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.011376 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e53f443e-499a-4a20-b97d-0c78bea1c68a","Type":"ContainerDied","Data":"36285c86c8a0988e681bded6df0b43f4b147e70e22a90416b661ed37522ae528"} Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.017388 5039 generic.go:334] "Generic (PLEG): container finished" podID="49c0901c-2c86-40c0-8f18-56b111088afa" containerID="e3eadfa09be4e7f9da5b573003282b192cbee945628285e368cf9660be58a41f" exitCode=0 Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.017434 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"49c0901c-2c86-40c0-8f18-56b111088afa","Type":"ContainerDied","Data":"e3eadfa09be4e7f9da5b573003282b192cbee945628285e368cf9660be58a41f"} Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.140252 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.312744 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-scripts\") pod \"49c0901c-2c86-40c0-8f18-56b111088afa\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.313592 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/49c0901c-2c86-40c0-8f18-56b111088afa-etc-machine-id\") pod \"49c0901c-2c86-40c0-8f18-56b111088afa\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.313644 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/49c0901c-2c86-40c0-8f18-56b111088afa-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "49c0901c-2c86-40c0-8f18-56b111088afa" (UID: "49c0901c-2c86-40c0-8f18-56b111088afa"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.313654 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-config-data-custom\") pod \"49c0901c-2c86-40c0-8f18-56b111088afa\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.313709 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-config-data\") pod \"49c0901c-2c86-40c0-8f18-56b111088afa\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.313800 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5prvk\" (UniqueName: \"kubernetes.io/projected/49c0901c-2c86-40c0-8f18-56b111088afa-kube-api-access-5prvk\") pod \"49c0901c-2c86-40c0-8f18-56b111088afa\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.313839 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-combined-ca-bundle\") pod \"49c0901c-2c86-40c0-8f18-56b111088afa\" (UID: \"49c0901c-2c86-40c0-8f18-56b111088afa\") " Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.314675 5039 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/49c0901c-2c86-40c0-8f18-56b111088afa-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.322884 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c0901c-2c86-40c0-8f18-56b111088afa-kube-api-access-5prvk" (OuterVolumeSpecName: "kube-api-access-5prvk") pod "49c0901c-2c86-40c0-8f18-56b111088afa" (UID: "49c0901c-2c86-40c0-8f18-56b111088afa"). InnerVolumeSpecName "kube-api-access-5prvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.323638 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "49c0901c-2c86-40c0-8f18-56b111088afa" (UID: "49c0901c-2c86-40c0-8f18-56b111088afa"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.347056 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-scripts" (OuterVolumeSpecName: "scripts") pod "49c0901c-2c86-40c0-8f18-56b111088afa" (UID: "49c0901c-2c86-40c0-8f18-56b111088afa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.384444 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "49c0901c-2c86-40c0-8f18-56b111088afa" (UID: "49c0901c-2c86-40c0-8f18-56b111088afa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.416530 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5prvk\" (UniqueName: \"kubernetes.io/projected/49c0901c-2c86-40c0-8f18-56b111088afa-kube-api-access-5prvk\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.416554 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.416563 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.416573 5039 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.417591 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.461905 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-config-data" (OuterVolumeSpecName: "config-data") pod "49c0901c-2c86-40c0-8f18-56b111088afa" (UID: "49c0901c-2c86-40c0-8f18-56b111088afa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.518964 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c0901c-2c86-40c0-8f18-56b111088afa-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.620022 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-scripts\") pod \"e53f443e-499a-4a20-b97d-0c78bea1c68a\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.620330 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-sg-core-conf-yaml\") pod \"e53f443e-499a-4a20-b97d-0c78bea1c68a\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.621634 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-combined-ca-bundle\") pod \"e53f443e-499a-4a20-b97d-0c78bea1c68a\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.622071 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-52c7h\" (UniqueName: \"kubernetes.io/projected/e53f443e-499a-4a20-b97d-0c78bea1c68a-kube-api-access-52c7h\") pod \"e53f443e-499a-4a20-b97d-0c78bea1c68a\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.622239 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-ceilometer-tls-certs\") pod \"e53f443e-499a-4a20-b97d-0c78bea1c68a\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.622401 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e53f443e-499a-4a20-b97d-0c78bea1c68a-run-httpd\") pod \"e53f443e-499a-4a20-b97d-0c78bea1c68a\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.622613 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-config-data\") pod \"e53f443e-499a-4a20-b97d-0c78bea1c68a\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.623229 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e53f443e-499a-4a20-b97d-0c78bea1c68a-log-httpd\") pod \"e53f443e-499a-4a20-b97d-0c78bea1c68a\" (UID: \"e53f443e-499a-4a20-b97d-0c78bea1c68a\") " Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.623173 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e53f443e-499a-4a20-b97d-0c78bea1c68a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e53f443e-499a-4a20-b97d-0c78bea1c68a" (UID: "e53f443e-499a-4a20-b97d-0c78bea1c68a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.624390 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e53f443e-499a-4a20-b97d-0c78bea1c68a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e53f443e-499a-4a20-b97d-0c78bea1c68a" (UID: "e53f443e-499a-4a20-b97d-0c78bea1c68a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.677759 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-scripts" (OuterVolumeSpecName: "scripts") pod "e53f443e-499a-4a20-b97d-0c78bea1c68a" (UID: "e53f443e-499a-4a20-b97d-0c78bea1c68a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.684645 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e53f443e-499a-4a20-b97d-0c78bea1c68a-kube-api-access-52c7h" (OuterVolumeSpecName: "kube-api-access-52c7h") pod "e53f443e-499a-4a20-b97d-0c78bea1c68a" (UID: "e53f443e-499a-4a20-b97d-0c78bea1c68a"). InnerVolumeSpecName "kube-api-access-52c7h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.694145 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e53f443e-499a-4a20-b97d-0c78bea1c68a" (UID: "e53f443e-499a-4a20-b97d-0c78bea1c68a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.714146 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "e53f443e-499a-4a20-b97d-0c78bea1c68a" (UID: "e53f443e-499a-4a20-b97d-0c78bea1c68a"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.725417 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.725452 5039 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.725466 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-52c7h\" (UniqueName: \"kubernetes.io/projected/e53f443e-499a-4a20-b97d-0c78bea1c68a-kube-api-access-52c7h\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.725477 5039 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.725491 5039 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e53f443e-499a-4a20-b97d-0c78bea1c68a-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.725521 5039 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e53f443e-499a-4a20-b97d-0c78bea1c68a-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.771486 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e53f443e-499a-4a20-b97d-0c78bea1c68a" (UID: "e53f443e-499a-4a20-b97d-0c78bea1c68a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.809647 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-config-data" (OuterVolumeSpecName: "config-data") pod "e53f443e-499a-4a20-b97d-0c78bea1c68a" (UID: "e53f443e-499a-4a20-b97d-0c78bea1c68a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.827333 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:42 crc kubenswrapper[5039]: I1124 14:30:42.827363 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e53f443e-499a-4a20-b97d-0c78bea1c68a-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.043862 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"49c0901c-2c86-40c0-8f18-56b111088afa","Type":"ContainerDied","Data":"2d04ad9cb5bfc6e70cab87d6f0120b59124dcb2bcd62267631688aa2c21f1729"} Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.043931 5039 scope.go:117] "RemoveContainer" containerID="feb3ca8cb3db94a776c82aa07045c90a8c1887257d63ad579c762491e3778a36" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.044108 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.049109 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e53f443e-499a-4a20-b97d-0c78bea1c68a","Type":"ContainerDied","Data":"e4a39dee950783519f32dbf8fb1f284b381447f5fbdb51e11c7f7e5d15775659"} Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.049282 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.080401 5039 scope.go:117] "RemoveContainer" containerID="e3eadfa09be4e7f9da5b573003282b192cbee945628285e368cf9660be58a41f" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.103167 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-scheduler-0"] Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.120825 5039 scope.go:117] "RemoveContainer" containerID="35e5734da9b22913e04de66868c040ba0403c2b331c6127d9a85a6cae8ebe839" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.124707 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-scheduler-0"] Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.136381 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.147170 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.152545 5039 scope.go:117] "RemoveContainer" containerID="4742a9552bde01d12b0b0208d7bb448681ebb145ddbc60a47ebb35d55e9d9006" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.156646 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Nov 24 14:30:43 crc kubenswrapper[5039]: E1124 14:30:43.157167 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49c0901c-2c86-40c0-8f18-56b111088afa" containerName="probe" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.157190 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="49c0901c-2c86-40c0-8f18-56b111088afa" containerName="probe" Nov 24 14:30:43 crc kubenswrapper[5039]: E1124 14:30:43.157213 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e53f443e-499a-4a20-b97d-0c78bea1c68a" containerName="ceilometer-central-agent" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.157223 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="e53f443e-499a-4a20-b97d-0c78bea1c68a" containerName="ceilometer-central-agent" Nov 24 14:30:43 crc kubenswrapper[5039]: E1124 14:30:43.157242 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e53f443e-499a-4a20-b97d-0c78bea1c68a" containerName="proxy-httpd" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.157251 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="e53f443e-499a-4a20-b97d-0c78bea1c68a" containerName="proxy-httpd" Nov 24 14:30:43 crc kubenswrapper[5039]: E1124 14:30:43.157278 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e53f443e-499a-4a20-b97d-0c78bea1c68a" containerName="ceilometer-notification-agent" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.157287 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="e53f443e-499a-4a20-b97d-0c78bea1c68a" containerName="ceilometer-notification-agent" Nov 24 14:30:43 crc kubenswrapper[5039]: E1124 14:30:43.157353 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e53f443e-499a-4a20-b97d-0c78bea1c68a" containerName="sg-core" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.157361 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="e53f443e-499a-4a20-b97d-0c78bea1c68a" containerName="sg-core" Nov 24 14:30:43 crc kubenswrapper[5039]: E1124 14:30:43.157397 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49c0901c-2c86-40c0-8f18-56b111088afa" containerName="manila-scheduler" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.157410 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="49c0901c-2c86-40c0-8f18-56b111088afa" containerName="manila-scheduler" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.157669 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="e53f443e-499a-4a20-b97d-0c78bea1c68a" containerName="proxy-httpd" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.157691 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="49c0901c-2c86-40c0-8f18-56b111088afa" containerName="manila-scheduler" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.157713 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="e53f443e-499a-4a20-b97d-0c78bea1c68a" containerName="ceilometer-notification-agent" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.157727 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="e53f443e-499a-4a20-b97d-0c78bea1c68a" containerName="ceilometer-central-agent" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.157737 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="e53f443e-499a-4a20-b97d-0c78bea1c68a" containerName="sg-core" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.157746 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="49c0901c-2c86-40c0-8f18-56b111088afa" containerName="probe" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.158908 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.161091 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.165781 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.179542 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.188402 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.194193 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.194848 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.195738 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.196316 5039 scope.go:117] "RemoveContainer" containerID="4f12bd009b170932a6af715ee18e177602353aa3c2622a09459957b1a8e3de46" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.196485 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.225207 5039 scope.go:117] "RemoveContainer" containerID="36285c86c8a0988e681bded6df0b43f4b147e70e22a90416b661ed37522ae528" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.241874 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f957236-16e6-45c4-8174-b20f69df4ecb-config-data\") pod \"manila-scheduler-0\" (UID: \"8f957236-16e6-45c4-8174-b20f69df4ecb\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.241931 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f957236-16e6-45c4-8174-b20f69df4ecb-scripts\") pod \"manila-scheduler-0\" (UID: \"8f957236-16e6-45c4-8174-b20f69df4ecb\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.241954 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-run-httpd\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.241983 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8f957236-16e6-45c4-8174-b20f69df4ecb-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"8f957236-16e6-45c4-8174-b20f69df4ecb\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.242009 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f957236-16e6-45c4-8174-b20f69df4ecb-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"8f957236-16e6-45c4-8174-b20f69df4ecb\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.242053 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-log-httpd\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.242070 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.242119 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8f957236-16e6-45c4-8174-b20f69df4ecb-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"8f957236-16e6-45c4-8174-b20f69df4ecb\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.242135 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bndw8\" (UniqueName: \"kubernetes.io/projected/8f957236-16e6-45c4-8174-b20f69df4ecb-kube-api-access-bndw8\") pod \"manila-scheduler-0\" (UID: \"8f957236-16e6-45c4-8174-b20f69df4ecb\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.242167 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.242197 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.242217 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-scripts\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.242241 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7n4n\" (UniqueName: \"kubernetes.io/projected/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-kube-api-access-r7n4n\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.242266 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-config-data\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.344630 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.345125 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-scripts\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.345171 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7n4n\" (UniqueName: \"kubernetes.io/projected/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-kube-api-access-r7n4n\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.345438 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-config-data\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.345564 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f957236-16e6-45c4-8174-b20f69df4ecb-config-data\") pod \"manila-scheduler-0\" (UID: \"8f957236-16e6-45c4-8174-b20f69df4ecb\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.345627 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f957236-16e6-45c4-8174-b20f69df4ecb-scripts\") pod \"manila-scheduler-0\" (UID: \"8f957236-16e6-45c4-8174-b20f69df4ecb\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.345650 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-run-httpd\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.345689 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8f957236-16e6-45c4-8174-b20f69df4ecb-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"8f957236-16e6-45c4-8174-b20f69df4ecb\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.345729 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f957236-16e6-45c4-8174-b20f69df4ecb-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"8f957236-16e6-45c4-8174-b20f69df4ecb\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.345921 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-log-httpd\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.345993 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.346177 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8f957236-16e6-45c4-8174-b20f69df4ecb-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"8f957236-16e6-45c4-8174-b20f69df4ecb\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.346210 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bndw8\" (UniqueName: \"kubernetes.io/projected/8f957236-16e6-45c4-8174-b20f69df4ecb-kube-api-access-bndw8\") pod \"manila-scheduler-0\" (UID: \"8f957236-16e6-45c4-8174-b20f69df4ecb\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.346280 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.346359 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-log-httpd\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.346454 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-run-httpd\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.346559 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8f957236-16e6-45c4-8174-b20f69df4ecb-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"8f957236-16e6-45c4-8174-b20f69df4ecb\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.350609 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.351163 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.351581 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f957236-16e6-45c4-8174-b20f69df4ecb-scripts\") pod \"manila-scheduler-0\" (UID: \"8f957236-16e6-45c4-8174-b20f69df4ecb\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.352212 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.352371 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f957236-16e6-45c4-8174-b20f69df4ecb-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"8f957236-16e6-45c4-8174-b20f69df4ecb\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.353028 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8f957236-16e6-45c4-8174-b20f69df4ecb-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"8f957236-16e6-45c4-8174-b20f69df4ecb\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.354145 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f957236-16e6-45c4-8174-b20f69df4ecb-config-data\") pod \"manila-scheduler-0\" (UID: \"8f957236-16e6-45c4-8174-b20f69df4ecb\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.357127 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-scripts\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.359137 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-config-data\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.369163 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bndw8\" (UniqueName: \"kubernetes.io/projected/8f957236-16e6-45c4-8174-b20f69df4ecb-kube-api-access-bndw8\") pod \"manila-scheduler-0\" (UID: \"8f957236-16e6-45c4-8174-b20f69df4ecb\") " pod="openstack/manila-scheduler-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.369476 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7n4n\" (UniqueName: \"kubernetes.io/projected/066b84eb-20a0-4d2a-b970-6a4419ac3dcc-kube-api-access-r7n4n\") pod \"ceilometer-0\" (UID: \"066b84eb-20a0-4d2a-b970-6a4419ac3dcc\") " pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.484129 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.514804 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.970583 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-6c5b658bc4-625q2" podUID="ee231063-13a0-4a14-9864-362a8459b8e7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.69:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.69:8443: connect: connection refused" Nov 24 14:30:43 crc kubenswrapper[5039]: I1124 14:30:43.982809 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 24 14:30:44 crc kubenswrapper[5039]: I1124 14:30:44.067731 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"8f957236-16e6-45c4-8174-b20f69df4ecb","Type":"ContainerStarted","Data":"51dc6358ad5b89ff5dd97bf249baf91257c62ae76b7b8c1995ca7a209fab10da"} Nov 24 14:30:44 crc kubenswrapper[5039]: I1124 14:30:44.110617 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 14:30:44 crc kubenswrapper[5039]: W1124 14:30:44.123787 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod066b84eb_20a0_4d2a_b970_6a4419ac3dcc.slice/crio-8b17314ab6f51db136a33a8116355513ce7fb017f843da0afdae2a0ba87e49e5 WatchSource:0}: Error finding container 8b17314ab6f51db136a33a8116355513ce7fb017f843da0afdae2a0ba87e49e5: Status 404 returned error can't find the container with id 8b17314ab6f51db136a33a8116355513ce7fb017f843da0afdae2a0ba87e49e5 Nov 24 14:30:44 crc kubenswrapper[5039]: I1124 14:30:44.323203 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c0901c-2c86-40c0-8f18-56b111088afa" path="/var/lib/kubelet/pods/49c0901c-2c86-40c0-8f18-56b111088afa/volumes" Nov 24 14:30:44 crc kubenswrapper[5039]: I1124 14:30:44.324205 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e53f443e-499a-4a20-b97d-0c78bea1c68a" path="/var/lib/kubelet/pods/e53f443e-499a-4a20-b97d-0c78bea1c68a/volumes" Nov 24 14:30:45 crc kubenswrapper[5039]: I1124 14:30:45.094628 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"8f957236-16e6-45c4-8174-b20f69df4ecb","Type":"ContainerStarted","Data":"a5daa1329cad0048206086ee34fe546457d93d22bc384f4389cb9ffa1f03e19e"} Nov 24 14:30:45 crc kubenswrapper[5039]: I1124 14:30:45.095136 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"8f957236-16e6-45c4-8174-b20f69df4ecb","Type":"ContainerStarted","Data":"39876f720e04bca8a71af80223a784504dfb16c4f382642e75c2becb258ef533"} Nov 24 14:30:45 crc kubenswrapper[5039]: I1124 14:30:45.102418 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"066b84eb-20a0-4d2a-b970-6a4419ac3dcc","Type":"ContainerStarted","Data":"faeab757c58c4714be08920a6fb141c59e89898260c923a633f7c3dd9b3dd3c6"} Nov 24 14:30:45 crc kubenswrapper[5039]: I1124 14:30:45.102470 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"066b84eb-20a0-4d2a-b970-6a4419ac3dcc","Type":"ContainerStarted","Data":"8b17314ab6f51db136a33a8116355513ce7fb017f843da0afdae2a0ba87e49e5"} Nov 24 14:30:45 crc kubenswrapper[5039]: I1124 14:30:45.126253 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=2.126234589 podStartE2EDuration="2.126234589s" podCreationTimestamp="2025-11-24 14:30:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 14:30:45.116208304 +0000 UTC m=+4357.555332804" watchObservedRunningTime="2025-11-24 14:30:45.126234589 +0000 UTC m=+4357.565359089" Nov 24 14:30:45 crc kubenswrapper[5039]: I1124 14:30:45.655328 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/manila-api-0" Nov 24 14:30:46 crc kubenswrapper[5039]: I1124 14:30:46.117337 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"066b84eb-20a0-4d2a-b970-6a4419ac3dcc","Type":"ContainerStarted","Data":"ab72927d563f6dbf1e5e7ab198629ba1619d6a3499b1bbfbd3af570a23468084"} Nov 24 14:30:47 crc kubenswrapper[5039]: I1124 14:30:47.136771 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"066b84eb-20a0-4d2a-b970-6a4419ac3dcc","Type":"ContainerStarted","Data":"d60bdc33002634ea282a3341df32f538157a936475f4b826c44c52fefe7ede44"} Nov 24 14:30:49 crc kubenswrapper[5039]: I1124 14:30:49.163985 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"066b84eb-20a0-4d2a-b970-6a4419ac3dcc","Type":"ContainerStarted","Data":"d763b4d4b57703b3be25f9eb9feaefcab2005159519e33b65e1c5d0f279ac02a"} Nov 24 14:30:49 crc kubenswrapper[5039]: I1124 14:30:49.164496 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 14:30:49 crc kubenswrapper[5039]: I1124 14:30:49.197436 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.212001636 podStartE2EDuration="6.197413342s" podCreationTimestamp="2025-11-24 14:30:43 +0000 UTC" firstStartedPulling="2025-11-24 14:30:44.128673593 +0000 UTC m=+4356.567798083" lastFinishedPulling="2025-11-24 14:30:48.114085279 +0000 UTC m=+4360.553209789" observedRunningTime="2025-11-24 14:30:49.187170431 +0000 UTC m=+4361.626294931" watchObservedRunningTime="2025-11-24 14:30:49.197413342 +0000 UTC m=+4361.636537842" Nov 24 14:30:49 crc kubenswrapper[5039]: I1124 14:30:49.375705 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-share-share1-0" Nov 24 14:30:49 crc kubenswrapper[5039]: I1124 14:30:49.419305 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-share-share1-0"] Nov 24 14:30:50 crc kubenswrapper[5039]: I1124 14:30:50.175617 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-share-share1-0" podUID="0e10bdf5-f36b-4b1d-b0ae-2304b07be38e" containerName="manila-share" containerID="cri-o://aa3606c8a46dac0ec9243ac17583744721466d005f0f31c3fbcc832b3e7254d4" gracePeriod=30 Nov 24 14:30:50 crc kubenswrapper[5039]: I1124 14:30:50.175703 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-share-share1-0" podUID="0e10bdf5-f36b-4b1d-b0ae-2304b07be38e" containerName="probe" containerID="cri-o://980c94e18a354f430f9f10b6299157b5dc5d1ea52788e494a02e65ce9e46078f" gracePeriod=30 Nov 24 14:30:50 crc kubenswrapper[5039]: I1124 14:30:50.306897 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:30:50 crc kubenswrapper[5039]: E1124 14:30:50.307219 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.193111 5039 generic.go:334] "Generic (PLEG): container finished" podID="0e10bdf5-f36b-4b1d-b0ae-2304b07be38e" containerID="980c94e18a354f430f9f10b6299157b5dc5d1ea52788e494a02e65ce9e46078f" exitCode=0 Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.193457 5039 generic.go:334] "Generic (PLEG): container finished" podID="0e10bdf5-f36b-4b1d-b0ae-2304b07be38e" containerID="aa3606c8a46dac0ec9243ac17583744721466d005f0f31c3fbcc832b3e7254d4" exitCode=1 Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.193184 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e","Type":"ContainerDied","Data":"980c94e18a354f430f9f10b6299157b5dc5d1ea52788e494a02e65ce9e46078f"} Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.193494 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e","Type":"ContainerDied","Data":"aa3606c8a46dac0ec9243ac17583744721466d005f0f31c3fbcc832b3e7254d4"} Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.401329 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.501648 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-var-lib-manila\") pod \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.501875 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-config-data\") pod \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.501982 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fc9kh\" (UniqueName: \"kubernetes.io/projected/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-kube-api-access-fc9kh\") pod \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.502144 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-combined-ca-bundle\") pod \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.502324 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-ceph\") pod \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.502449 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-etc-machine-id\") pod \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.502629 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-scripts\") pod \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.503863 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-config-data-custom\") pod \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\" (UID: \"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e\") " Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.502901 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "0e10bdf5-f36b-4b1d-b0ae-2304b07be38e" (UID: "0e10bdf5-f36b-4b1d-b0ae-2304b07be38e"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.502933 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-var-lib-manila" (OuterVolumeSpecName: "var-lib-manila") pod "0e10bdf5-f36b-4b1d-b0ae-2304b07be38e" (UID: "0e10bdf5-f36b-4b1d-b0ae-2304b07be38e"). InnerVolumeSpecName "var-lib-manila". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.509025 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0e10bdf5-f36b-4b1d-b0ae-2304b07be38e" (UID: "0e10bdf5-f36b-4b1d-b0ae-2304b07be38e"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.509612 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-scripts" (OuterVolumeSpecName: "scripts") pod "0e10bdf5-f36b-4b1d-b0ae-2304b07be38e" (UID: "0e10bdf5-f36b-4b1d-b0ae-2304b07be38e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.511078 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-kube-api-access-fc9kh" (OuterVolumeSpecName: "kube-api-access-fc9kh") pod "0e10bdf5-f36b-4b1d-b0ae-2304b07be38e" (UID: "0e10bdf5-f36b-4b1d-b0ae-2304b07be38e"). InnerVolumeSpecName "kube-api-access-fc9kh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.511588 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-ceph" (OuterVolumeSpecName: "ceph") pod "0e10bdf5-f36b-4b1d-b0ae-2304b07be38e" (UID: "0e10bdf5-f36b-4b1d-b0ae-2304b07be38e"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.583146 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0e10bdf5-f36b-4b1d-b0ae-2304b07be38e" (UID: "0e10bdf5-f36b-4b1d-b0ae-2304b07be38e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.607949 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.607991 5039 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-ceph\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.608008 5039 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.608019 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.608032 5039 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.608043 5039 reconciler_common.go:293] "Volume detached for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-var-lib-manila\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.608054 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fc9kh\" (UniqueName: \"kubernetes.io/projected/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-kube-api-access-fc9kh\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.635737 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-config-data" (OuterVolumeSpecName: "config-data") pod "0e10bdf5-f36b-4b1d-b0ae-2304b07be38e" (UID: "0e10bdf5-f36b-4b1d-b0ae-2304b07be38e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:51 crc kubenswrapper[5039]: I1124 14:30:51.710380 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.208333 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"0e10bdf5-f36b-4b1d-b0ae-2304b07be38e","Type":"ContainerDied","Data":"e02ad467d3a73621882a69a2f9043c6b8ba9275c53c100438deb5f39c5a047fb"} Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.208639 5039 scope.go:117] "RemoveContainer" containerID="980c94e18a354f430f9f10b6299157b5dc5d1ea52788e494a02e65ce9e46078f" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.208419 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.256137 5039 scope.go:117] "RemoveContainer" containerID="aa3606c8a46dac0ec9243ac17583744721466d005f0f31c3fbcc832b3e7254d4" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.276211 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-share-share1-0"] Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.305696 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-share-share1-0"] Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.326597 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e10bdf5-f36b-4b1d-b0ae-2304b07be38e" path="/var/lib/kubelet/pods/0e10bdf5-f36b-4b1d-b0ae-2304b07be38e/volumes" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.327251 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Nov 24 14:30:52 crc kubenswrapper[5039]: E1124 14:30:52.328372 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e10bdf5-f36b-4b1d-b0ae-2304b07be38e" containerName="probe" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.328393 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e10bdf5-f36b-4b1d-b0ae-2304b07be38e" containerName="probe" Nov 24 14:30:52 crc kubenswrapper[5039]: E1124 14:30:52.328411 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e10bdf5-f36b-4b1d-b0ae-2304b07be38e" containerName="manila-share" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.328418 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e10bdf5-f36b-4b1d-b0ae-2304b07be38e" containerName="manila-share" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.328661 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e10bdf5-f36b-4b1d-b0ae-2304b07be38e" containerName="probe" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.328683 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e10bdf5-f36b-4b1d-b0ae-2304b07be38e" containerName="manila-share" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.330067 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.334911 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.340937 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.430944 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/48847c7a-e55f-4a84-8448-89447c762f34-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.430991 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48847c7a-e55f-4a84-8448-89447c762f34-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.431079 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48847c7a-e55f-4a84-8448-89447c762f34-config-data\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.431100 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/48847c7a-e55f-4a84-8448-89447c762f34-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.431128 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/48847c7a-e55f-4a84-8448-89447c762f34-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.431143 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/48847c7a-e55f-4a84-8448-89447c762f34-scripts\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.431202 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/48847c7a-e55f-4a84-8448-89447c762f34-ceph\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.431228 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkl5q\" (UniqueName: \"kubernetes.io/projected/48847c7a-e55f-4a84-8448-89447c762f34-kube-api-access-fkl5q\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.532668 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/48847c7a-e55f-4a84-8448-89447c762f34-ceph\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.532725 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkl5q\" (UniqueName: \"kubernetes.io/projected/48847c7a-e55f-4a84-8448-89447c762f34-kube-api-access-fkl5q\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.532796 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/48847c7a-e55f-4a84-8448-89447c762f34-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.532817 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48847c7a-e55f-4a84-8448-89447c762f34-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.532895 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48847c7a-e55f-4a84-8448-89447c762f34-config-data\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.532910 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/48847c7a-e55f-4a84-8448-89447c762f34-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.532938 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/48847c7a-e55f-4a84-8448-89447c762f34-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.532954 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/48847c7a-e55f-4a84-8448-89447c762f34-scripts\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.533615 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/48847c7a-e55f-4a84-8448-89447c762f34-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.533747 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/48847c7a-e55f-4a84-8448-89447c762f34-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.537283 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/48847c7a-e55f-4a84-8448-89447c762f34-scripts\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.537702 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/48847c7a-e55f-4a84-8448-89447c762f34-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.539045 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48847c7a-e55f-4a84-8448-89447c762f34-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.540198 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/48847c7a-e55f-4a84-8448-89447c762f34-ceph\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.555934 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkl5q\" (UniqueName: \"kubernetes.io/projected/48847c7a-e55f-4a84-8448-89447c762f34-kube-api-access-fkl5q\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.577819 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48847c7a-e55f-4a84-8448-89447c762f34-config-data\") pod \"manila-share-share1-0\" (UID: \"48847c7a-e55f-4a84-8448-89447c762f34\") " pod="openstack/manila-share-share1-0" Nov 24 14:30:52 crc kubenswrapper[5039]: I1124 14:30:52.665556 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 24 14:30:53 crc kubenswrapper[5039]: I1124 14:30:53.290783 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 24 14:30:53 crc kubenswrapper[5039]: I1124 14:30:53.484876 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Nov 24 14:30:53 crc kubenswrapper[5039]: I1124 14:30:53.970903 5039 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-6c5b658bc4-625q2" podUID="ee231063-13a0-4a14-9864-362a8459b8e7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.69:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.69:8443: connect: connection refused" Nov 24 14:30:54 crc kubenswrapper[5039]: I1124 14:30:54.239570 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"48847c7a-e55f-4a84-8448-89447c762f34","Type":"ContainerStarted","Data":"777992d5d3c24a19cd577e1b4dfb4ef2180b54c5f56823ba4a0ebc875c7b9d22"} Nov 24 14:30:54 crc kubenswrapper[5039]: I1124 14:30:54.239983 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"48847c7a-e55f-4a84-8448-89447c762f34","Type":"ContainerStarted","Data":"1e84d36e346eacd9e8220fc6f18907c86e18ef72c17f580be3f7dc26d6856f93"} Nov 24 14:30:54 crc kubenswrapper[5039]: I1124 14:30:54.332692 5039 scope.go:117] "RemoveContainer" containerID="15a0e18f432a7fbbb3d42a409ba29d73bfd79f8aeda2d3cc5fd265c1dd70a2f8" Nov 24 14:30:55 crc kubenswrapper[5039]: I1124 14:30:55.256369 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"48847c7a-e55f-4a84-8448-89447c762f34","Type":"ContainerStarted","Data":"cb96474d53747dbeb9b5686586aa8097ee530c3070bb6d14c4e65ee2377abcba"} Nov 24 14:30:55 crc kubenswrapper[5039]: I1124 14:30:55.281968 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=3.281950002 podStartE2EDuration="3.281950002s" podCreationTimestamp="2025-11-24 14:30:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 14:30:55.277122454 +0000 UTC m=+4367.716246974" watchObservedRunningTime="2025-11-24 14:30:55.281950002 +0000 UTC m=+4367.721074512" Nov 24 14:30:59 crc kubenswrapper[5039]: I1124 14:30:59.317601 5039 generic.go:334] "Generic (PLEG): container finished" podID="ee231063-13a0-4a14-9864-362a8459b8e7" containerID="d49ed4e675c018d6675328a5efa7109f8ba13218f9a5be6b7962ff27a851bccd" exitCode=137 Nov 24 14:30:59 crc kubenswrapper[5039]: I1124 14:30:59.317733 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c5b658bc4-625q2" event={"ID":"ee231063-13a0-4a14-9864-362a8459b8e7","Type":"ContainerDied","Data":"d49ed4e675c018d6675328a5efa7109f8ba13218f9a5be6b7962ff27a851bccd"} Nov 24 14:30:59 crc kubenswrapper[5039]: I1124 14:30:59.806753 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:30:59 crc kubenswrapper[5039]: I1124 14:30:59.903587 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee231063-13a0-4a14-9864-362a8459b8e7-horizon-tls-certs\") pod \"ee231063-13a0-4a14-9864-362a8459b8e7\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " Nov 24 14:30:59 crc kubenswrapper[5039]: I1124 14:30:59.903650 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee231063-13a0-4a14-9864-362a8459b8e7-logs\") pod \"ee231063-13a0-4a14-9864-362a8459b8e7\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " Nov 24 14:30:59 crc kubenswrapper[5039]: I1124 14:30:59.903819 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ee231063-13a0-4a14-9864-362a8459b8e7-scripts\") pod \"ee231063-13a0-4a14-9864-362a8459b8e7\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " Nov 24 14:30:59 crc kubenswrapper[5039]: I1124 14:30:59.903853 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ee231063-13a0-4a14-9864-362a8459b8e7-horizon-secret-key\") pod \"ee231063-13a0-4a14-9864-362a8459b8e7\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " Nov 24 14:30:59 crc kubenswrapper[5039]: I1124 14:30:59.904046 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee231063-13a0-4a14-9864-362a8459b8e7-combined-ca-bundle\") pod \"ee231063-13a0-4a14-9864-362a8459b8e7\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " Nov 24 14:30:59 crc kubenswrapper[5039]: I1124 14:30:59.904099 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ee231063-13a0-4a14-9864-362a8459b8e7-config-data\") pod \"ee231063-13a0-4a14-9864-362a8459b8e7\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " Nov 24 14:30:59 crc kubenswrapper[5039]: I1124 14:30:59.904171 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dcv96\" (UniqueName: \"kubernetes.io/projected/ee231063-13a0-4a14-9864-362a8459b8e7-kube-api-access-dcv96\") pod \"ee231063-13a0-4a14-9864-362a8459b8e7\" (UID: \"ee231063-13a0-4a14-9864-362a8459b8e7\") " Nov 24 14:30:59 crc kubenswrapper[5039]: I1124 14:30:59.904409 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee231063-13a0-4a14-9864-362a8459b8e7-logs" (OuterVolumeSpecName: "logs") pod "ee231063-13a0-4a14-9864-362a8459b8e7" (UID: "ee231063-13a0-4a14-9864-362a8459b8e7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:30:59 crc kubenswrapper[5039]: I1124 14:30:59.905316 5039 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee231063-13a0-4a14-9864-362a8459b8e7-logs\") on node \"crc\" DevicePath \"\"" Nov 24 14:30:59 crc kubenswrapper[5039]: I1124 14:30:59.908985 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee231063-13a0-4a14-9864-362a8459b8e7-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "ee231063-13a0-4a14-9864-362a8459b8e7" (UID: "ee231063-13a0-4a14-9864-362a8459b8e7"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:59 crc kubenswrapper[5039]: I1124 14:30:59.913764 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee231063-13a0-4a14-9864-362a8459b8e7-kube-api-access-dcv96" (OuterVolumeSpecName: "kube-api-access-dcv96") pod "ee231063-13a0-4a14-9864-362a8459b8e7" (UID: "ee231063-13a0-4a14-9864-362a8459b8e7"). InnerVolumeSpecName "kube-api-access-dcv96". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:30:59 crc kubenswrapper[5039]: I1124 14:30:59.935955 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee231063-13a0-4a14-9864-362a8459b8e7-scripts" (OuterVolumeSpecName: "scripts") pod "ee231063-13a0-4a14-9864-362a8459b8e7" (UID: "ee231063-13a0-4a14-9864-362a8459b8e7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 14:30:59 crc kubenswrapper[5039]: I1124 14:30:59.942740 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee231063-13a0-4a14-9864-362a8459b8e7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ee231063-13a0-4a14-9864-362a8459b8e7" (UID: "ee231063-13a0-4a14-9864-362a8459b8e7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:30:59 crc kubenswrapper[5039]: I1124 14:30:59.942860 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee231063-13a0-4a14-9864-362a8459b8e7-config-data" (OuterVolumeSpecName: "config-data") pod "ee231063-13a0-4a14-9864-362a8459b8e7" (UID: "ee231063-13a0-4a14-9864-362a8459b8e7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 14:30:59 crc kubenswrapper[5039]: I1124 14:30:59.971172 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee231063-13a0-4a14-9864-362a8459b8e7-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "ee231063-13a0-4a14-9864-362a8459b8e7" (UID: "ee231063-13a0-4a14-9864-362a8459b8e7"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:31:00 crc kubenswrapper[5039]: I1124 14:31:00.006994 5039 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ee231063-13a0-4a14-9864-362a8459b8e7-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 14:31:00 crc kubenswrapper[5039]: I1124 14:31:00.007028 5039 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ee231063-13a0-4a14-9864-362a8459b8e7-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 24 14:31:00 crc kubenswrapper[5039]: I1124 14:31:00.007039 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee231063-13a0-4a14-9864-362a8459b8e7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 14:31:00 crc kubenswrapper[5039]: I1124 14:31:00.007048 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ee231063-13a0-4a14-9864-362a8459b8e7-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 14:31:00 crc kubenswrapper[5039]: I1124 14:31:00.007056 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dcv96\" (UniqueName: \"kubernetes.io/projected/ee231063-13a0-4a14-9864-362a8459b8e7-kube-api-access-dcv96\") on node \"crc\" DevicePath \"\"" Nov 24 14:31:00 crc kubenswrapper[5039]: I1124 14:31:00.007069 5039 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee231063-13a0-4a14-9864-362a8459b8e7-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 14:31:00 crc kubenswrapper[5039]: I1124 14:31:00.331934 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c5b658bc4-625q2" event={"ID":"ee231063-13a0-4a14-9864-362a8459b8e7","Type":"ContainerDied","Data":"6d627112dc2f3d89e3514506b0417ae80dd3074af6499c36c11a54c6fcffcf51"} Nov 24 14:31:00 crc kubenswrapper[5039]: I1124 14:31:00.332024 5039 scope.go:117] "RemoveContainer" containerID="c83ec4fbe52dbd8b6865ecf997689e3c1746c09451f88b0f608e7cd6c1b6502d" Nov 24 14:31:00 crc kubenswrapper[5039]: I1124 14:31:00.332159 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6c5b658bc4-625q2" Nov 24 14:31:00 crc kubenswrapper[5039]: I1124 14:31:00.369372 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6c5b658bc4-625q2"] Nov 24 14:31:00 crc kubenswrapper[5039]: I1124 14:31:00.379656 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-6c5b658bc4-625q2"] Nov 24 14:31:00 crc kubenswrapper[5039]: I1124 14:31:00.521638 5039 scope.go:117] "RemoveContainer" containerID="d49ed4e675c018d6675328a5efa7109f8ba13218f9a5be6b7962ff27a851bccd" Nov 24 14:31:02 crc kubenswrapper[5039]: I1124 14:31:02.308257 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:31:02 crc kubenswrapper[5039]: E1124 14:31:02.308955 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:31:02 crc kubenswrapper[5039]: I1124 14:31:02.322177 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee231063-13a0-4a14-9864-362a8459b8e7" path="/var/lib/kubelet/pods/ee231063-13a0-4a14-9864-362a8459b8e7/volumes" Nov 24 14:31:02 crc kubenswrapper[5039]: I1124 14:31:02.666651 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 24 14:31:05 crc kubenswrapper[5039]: I1124 14:31:05.015703 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Nov 24 14:31:13 crc kubenswrapper[5039]: I1124 14:31:13.538913 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 24 14:31:14 crc kubenswrapper[5039]: I1124 14:31:14.208357 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-share-share1-0" Nov 24 14:31:17 crc kubenswrapper[5039]: I1124 14:31:17.307763 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:31:17 crc kubenswrapper[5039]: E1124 14:31:17.308530 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:31:30 crc kubenswrapper[5039]: I1124 14:31:30.999111 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bkhz2"] Nov 24 14:31:31 crc kubenswrapper[5039]: E1124 14:31:31.000677 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee231063-13a0-4a14-9864-362a8459b8e7" containerName="horizon" Nov 24 14:31:31 crc kubenswrapper[5039]: I1124 14:31:31.000699 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee231063-13a0-4a14-9864-362a8459b8e7" containerName="horizon" Nov 24 14:31:31 crc kubenswrapper[5039]: E1124 14:31:31.000759 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee231063-13a0-4a14-9864-362a8459b8e7" containerName="horizon-log" Nov 24 14:31:31 crc kubenswrapper[5039]: I1124 14:31:31.000772 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee231063-13a0-4a14-9864-362a8459b8e7" containerName="horizon-log" Nov 24 14:31:31 crc kubenswrapper[5039]: I1124 14:31:31.001125 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee231063-13a0-4a14-9864-362a8459b8e7" containerName="horizon" Nov 24 14:31:31 crc kubenswrapper[5039]: I1124 14:31:31.001176 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee231063-13a0-4a14-9864-362a8459b8e7" containerName="horizon-log" Nov 24 14:31:31 crc kubenswrapper[5039]: I1124 14:31:31.003929 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bkhz2" Nov 24 14:31:31 crc kubenswrapper[5039]: I1124 14:31:31.012951 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bkhz2"] Nov 24 14:31:31 crc kubenswrapper[5039]: I1124 14:31:31.185330 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ld2pq\" (UniqueName: \"kubernetes.io/projected/36fbfba2-65ab-44a5-9e69-0f6c67426f55-kube-api-access-ld2pq\") pod \"redhat-operators-bkhz2\" (UID: \"36fbfba2-65ab-44a5-9e69-0f6c67426f55\") " pod="openshift-marketplace/redhat-operators-bkhz2" Nov 24 14:31:31 crc kubenswrapper[5039]: I1124 14:31:31.185399 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36fbfba2-65ab-44a5-9e69-0f6c67426f55-catalog-content\") pod \"redhat-operators-bkhz2\" (UID: \"36fbfba2-65ab-44a5-9e69-0f6c67426f55\") " pod="openshift-marketplace/redhat-operators-bkhz2" Nov 24 14:31:31 crc kubenswrapper[5039]: I1124 14:31:31.185577 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36fbfba2-65ab-44a5-9e69-0f6c67426f55-utilities\") pod \"redhat-operators-bkhz2\" (UID: \"36fbfba2-65ab-44a5-9e69-0f6c67426f55\") " pod="openshift-marketplace/redhat-operators-bkhz2" Nov 24 14:31:31 crc kubenswrapper[5039]: I1124 14:31:31.288095 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36fbfba2-65ab-44a5-9e69-0f6c67426f55-catalog-content\") pod \"redhat-operators-bkhz2\" (UID: \"36fbfba2-65ab-44a5-9e69-0f6c67426f55\") " pod="openshift-marketplace/redhat-operators-bkhz2" Nov 24 14:31:31 crc kubenswrapper[5039]: I1124 14:31:31.288325 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36fbfba2-65ab-44a5-9e69-0f6c67426f55-utilities\") pod \"redhat-operators-bkhz2\" (UID: \"36fbfba2-65ab-44a5-9e69-0f6c67426f55\") " pod="openshift-marketplace/redhat-operators-bkhz2" Nov 24 14:31:31 crc kubenswrapper[5039]: I1124 14:31:31.288460 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ld2pq\" (UniqueName: \"kubernetes.io/projected/36fbfba2-65ab-44a5-9e69-0f6c67426f55-kube-api-access-ld2pq\") pod \"redhat-operators-bkhz2\" (UID: \"36fbfba2-65ab-44a5-9e69-0f6c67426f55\") " pod="openshift-marketplace/redhat-operators-bkhz2" Nov 24 14:31:31 crc kubenswrapper[5039]: I1124 14:31:31.289215 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36fbfba2-65ab-44a5-9e69-0f6c67426f55-catalog-content\") pod \"redhat-operators-bkhz2\" (UID: \"36fbfba2-65ab-44a5-9e69-0f6c67426f55\") " pod="openshift-marketplace/redhat-operators-bkhz2" Nov 24 14:31:31 crc kubenswrapper[5039]: I1124 14:31:31.289446 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36fbfba2-65ab-44a5-9e69-0f6c67426f55-utilities\") pod \"redhat-operators-bkhz2\" (UID: \"36fbfba2-65ab-44a5-9e69-0f6c67426f55\") " pod="openshift-marketplace/redhat-operators-bkhz2" Nov 24 14:31:31 crc kubenswrapper[5039]: I1124 14:31:31.308146 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:31:31 crc kubenswrapper[5039]: E1124 14:31:31.308623 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:31:31 crc kubenswrapper[5039]: I1124 14:31:31.308968 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ld2pq\" (UniqueName: \"kubernetes.io/projected/36fbfba2-65ab-44a5-9e69-0f6c67426f55-kube-api-access-ld2pq\") pod \"redhat-operators-bkhz2\" (UID: \"36fbfba2-65ab-44a5-9e69-0f6c67426f55\") " pod="openshift-marketplace/redhat-operators-bkhz2" Nov 24 14:31:31 crc kubenswrapper[5039]: I1124 14:31:31.328248 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bkhz2" Nov 24 14:31:31 crc kubenswrapper[5039]: I1124 14:31:31.786215 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bkhz2"] Nov 24 14:31:32 crc kubenswrapper[5039]: I1124 14:31:32.795865 5039 generic.go:334] "Generic (PLEG): container finished" podID="36fbfba2-65ab-44a5-9e69-0f6c67426f55" containerID="8158fccd9d50203b5df0a7d91ddfbd5309b7e95702cf082abf51322ca6e3838a" exitCode=0 Nov 24 14:31:32 crc kubenswrapper[5039]: I1124 14:31:32.795966 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bkhz2" event={"ID":"36fbfba2-65ab-44a5-9e69-0f6c67426f55","Type":"ContainerDied","Data":"8158fccd9d50203b5df0a7d91ddfbd5309b7e95702cf082abf51322ca6e3838a"} Nov 24 14:31:32 crc kubenswrapper[5039]: I1124 14:31:32.796162 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bkhz2" event={"ID":"36fbfba2-65ab-44a5-9e69-0f6c67426f55","Type":"ContainerStarted","Data":"2672622a062a45357796a8ac800085e2110904fb61b0d54fa53aa27de1a7114b"} Nov 24 14:31:42 crc kubenswrapper[5039]: I1124 14:31:42.307672 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:31:42 crc kubenswrapper[5039]: E1124 14:31:42.308436 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:31:42 crc kubenswrapper[5039]: I1124 14:31:42.927799 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bkhz2" event={"ID":"36fbfba2-65ab-44a5-9e69-0f6c67426f55","Type":"ContainerStarted","Data":"689fddf430a91a8b9c3fe3362496764ae38facab3682aff54f9e0815fa6f94e0"} Nov 24 14:31:44 crc kubenswrapper[5039]: I1124 14:31:44.950477 5039 generic.go:334] "Generic (PLEG): container finished" podID="36fbfba2-65ab-44a5-9e69-0f6c67426f55" containerID="689fddf430a91a8b9c3fe3362496764ae38facab3682aff54f9e0815fa6f94e0" exitCode=0 Nov 24 14:31:44 crc kubenswrapper[5039]: I1124 14:31:44.950561 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bkhz2" event={"ID":"36fbfba2-65ab-44a5-9e69-0f6c67426f55","Type":"ContainerDied","Data":"689fddf430a91a8b9c3fe3362496764ae38facab3682aff54f9e0815fa6f94e0"} Nov 24 14:31:45 crc kubenswrapper[5039]: I1124 14:31:45.966438 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bkhz2" event={"ID":"36fbfba2-65ab-44a5-9e69-0f6c67426f55","Type":"ContainerStarted","Data":"b53f03accbc3f7bca15b7c3f06df3ddd2b3a75caa9d105c2dc07af4043ef260c"} Nov 24 14:31:46 crc kubenswrapper[5039]: I1124 14:31:46.003393 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bkhz2" podStartSLOduration=3.40680377 podStartE2EDuration="16.003371691s" podCreationTimestamp="2025-11-24 14:31:30 +0000 UTC" firstStartedPulling="2025-11-24 14:31:32.799070203 +0000 UTC m=+4405.238194703" lastFinishedPulling="2025-11-24 14:31:45.395638084 +0000 UTC m=+4417.834762624" observedRunningTime="2025-11-24 14:31:45.984844488 +0000 UTC m=+4418.423969028" watchObservedRunningTime="2025-11-24 14:31:46.003371691 +0000 UTC m=+4418.442496201" Nov 24 14:31:51 crc kubenswrapper[5039]: I1124 14:31:51.328591 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bkhz2" Nov 24 14:31:51 crc kubenswrapper[5039]: I1124 14:31:51.329192 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bkhz2" Nov 24 14:31:51 crc kubenswrapper[5039]: I1124 14:31:51.377890 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bkhz2" Nov 24 14:31:52 crc kubenswrapper[5039]: I1124 14:31:52.107749 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bkhz2" Nov 24 14:31:52 crc kubenswrapper[5039]: I1124 14:31:52.200064 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bkhz2"] Nov 24 14:31:52 crc kubenswrapper[5039]: I1124 14:31:52.253084 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bnr64"] Nov 24 14:31:52 crc kubenswrapper[5039]: I1124 14:31:52.253319 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bnr64" podUID="c18cd2cc-a59b-4f16-bb59-f4536b737ef5" containerName="registry-server" containerID="cri-o://eae40f5f66686204735982d721f21361d3c56a41a6199b74e40a2887e2286189" gracePeriod=2 Nov 24 14:31:53 crc kubenswrapper[5039]: I1124 14:31:53.058625 5039 generic.go:334] "Generic (PLEG): container finished" podID="c18cd2cc-a59b-4f16-bb59-f4536b737ef5" containerID="eae40f5f66686204735982d721f21361d3c56a41a6199b74e40a2887e2286189" exitCode=0 Nov 24 14:31:53 crc kubenswrapper[5039]: I1124 14:31:53.058725 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bnr64" event={"ID":"c18cd2cc-a59b-4f16-bb59-f4536b737ef5","Type":"ContainerDied","Data":"eae40f5f66686204735982d721f21361d3c56a41a6199b74e40a2887e2286189"} Nov 24 14:31:53 crc kubenswrapper[5039]: I1124 14:31:53.443710 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bnr64" Nov 24 14:31:53 crc kubenswrapper[5039]: I1124 14:31:53.503542 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c18cd2cc-a59b-4f16-bb59-f4536b737ef5-utilities\") pod \"c18cd2cc-a59b-4f16-bb59-f4536b737ef5\" (UID: \"c18cd2cc-a59b-4f16-bb59-f4536b737ef5\") " Nov 24 14:31:53 crc kubenswrapper[5039]: I1124 14:31:53.503647 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-25l6h\" (UniqueName: \"kubernetes.io/projected/c18cd2cc-a59b-4f16-bb59-f4536b737ef5-kube-api-access-25l6h\") pod \"c18cd2cc-a59b-4f16-bb59-f4536b737ef5\" (UID: \"c18cd2cc-a59b-4f16-bb59-f4536b737ef5\") " Nov 24 14:31:53 crc kubenswrapper[5039]: I1124 14:31:53.503783 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c18cd2cc-a59b-4f16-bb59-f4536b737ef5-catalog-content\") pod \"c18cd2cc-a59b-4f16-bb59-f4536b737ef5\" (UID: \"c18cd2cc-a59b-4f16-bb59-f4536b737ef5\") " Nov 24 14:31:53 crc kubenswrapper[5039]: I1124 14:31:53.504953 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c18cd2cc-a59b-4f16-bb59-f4536b737ef5-utilities" (OuterVolumeSpecName: "utilities") pod "c18cd2cc-a59b-4f16-bb59-f4536b737ef5" (UID: "c18cd2cc-a59b-4f16-bb59-f4536b737ef5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:31:53 crc kubenswrapper[5039]: I1124 14:31:53.524243 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c18cd2cc-a59b-4f16-bb59-f4536b737ef5-kube-api-access-25l6h" (OuterVolumeSpecName: "kube-api-access-25l6h") pod "c18cd2cc-a59b-4f16-bb59-f4536b737ef5" (UID: "c18cd2cc-a59b-4f16-bb59-f4536b737ef5"). InnerVolumeSpecName "kube-api-access-25l6h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:31:53 crc kubenswrapper[5039]: I1124 14:31:53.612924 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c18cd2cc-a59b-4f16-bb59-f4536b737ef5-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:31:53 crc kubenswrapper[5039]: I1124 14:31:53.613892 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-25l6h\" (UniqueName: \"kubernetes.io/projected/c18cd2cc-a59b-4f16-bb59-f4536b737ef5-kube-api-access-25l6h\") on node \"crc\" DevicePath \"\"" Nov 24 14:31:53 crc kubenswrapper[5039]: I1124 14:31:53.617887 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c18cd2cc-a59b-4f16-bb59-f4536b737ef5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c18cd2cc-a59b-4f16-bb59-f4536b737ef5" (UID: "c18cd2cc-a59b-4f16-bb59-f4536b737ef5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:31:53 crc kubenswrapper[5039]: I1124 14:31:53.716407 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c18cd2cc-a59b-4f16-bb59-f4536b737ef5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:31:54 crc kubenswrapper[5039]: I1124 14:31:54.085940 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bnr64" Nov 24 14:31:54 crc kubenswrapper[5039]: I1124 14:31:54.085991 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bnr64" event={"ID":"c18cd2cc-a59b-4f16-bb59-f4536b737ef5","Type":"ContainerDied","Data":"b75bc4586505399ef330737af53c6bcbcd31a4b6b822ca543b627899bd23990b"} Nov 24 14:31:54 crc kubenswrapper[5039]: I1124 14:31:54.086714 5039 scope.go:117] "RemoveContainer" containerID="eae40f5f66686204735982d721f21361d3c56a41a6199b74e40a2887e2286189" Nov 24 14:31:54 crc kubenswrapper[5039]: I1124 14:31:54.114747 5039 scope.go:117] "RemoveContainer" containerID="bb370a2a572d2a169281009cb8e233e12c7a1c7bffc62eca0484635a10b5dd50" Nov 24 14:31:54 crc kubenswrapper[5039]: I1124 14:31:54.149473 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bnr64"] Nov 24 14:31:54 crc kubenswrapper[5039]: I1124 14:31:54.150365 5039 scope.go:117] "RemoveContainer" containerID="ebb2a38daef17c68ffad0ee219ac36094a67287d3e3f535accb451aef6aef142" Nov 24 14:31:54 crc kubenswrapper[5039]: I1124 14:31:54.172061 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bnr64"] Nov 24 14:31:54 crc kubenswrapper[5039]: I1124 14:31:54.318363 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c18cd2cc-a59b-4f16-bb59-f4536b737ef5" path="/var/lib/kubelet/pods/c18cd2cc-a59b-4f16-bb59-f4536b737ef5/volumes" Nov 24 14:31:56 crc kubenswrapper[5039]: I1124 14:31:56.306476 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:31:56 crc kubenswrapper[5039]: E1124 14:31:56.307026 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:32:10 crc kubenswrapper[5039]: I1124 14:32:10.307205 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:32:10 crc kubenswrapper[5039]: E1124 14:32:10.308034 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:32:23 crc kubenswrapper[5039]: I1124 14:32:23.307704 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:32:23 crc kubenswrapper[5039]: E1124 14:32:23.308890 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:32:38 crc kubenswrapper[5039]: I1124 14:32:38.321430 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:32:38 crc kubenswrapper[5039]: E1124 14:32:38.322271 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:32:53 crc kubenswrapper[5039]: I1124 14:32:53.307264 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:32:53 crc kubenswrapper[5039]: E1124 14:32:53.308382 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:33:08 crc kubenswrapper[5039]: I1124 14:33:08.307345 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:33:08 crc kubenswrapper[5039]: E1124 14:33:08.308803 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:33:22 crc kubenswrapper[5039]: I1124 14:33:22.307524 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:33:22 crc kubenswrapper[5039]: E1124 14:33:22.308634 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:33:35 crc kubenswrapper[5039]: I1124 14:33:35.306341 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:33:35 crc kubenswrapper[5039]: E1124 14:33:35.307136 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:33:50 crc kubenswrapper[5039]: I1124 14:33:50.308643 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:33:50 crc kubenswrapper[5039]: E1124 14:33:50.309698 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:34:02 crc kubenswrapper[5039]: I1124 14:34:02.307178 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:34:02 crc kubenswrapper[5039]: E1124 14:34:02.308241 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:34:13 crc kubenswrapper[5039]: I1124 14:34:13.308740 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:34:13 crc kubenswrapper[5039]: E1124 14:34:13.309524 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:34:24 crc kubenswrapper[5039]: I1124 14:34:24.307192 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:34:24 crc kubenswrapper[5039]: I1124 14:34:24.821171 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"08fa3c908c9993cba7053804a49fe91058545f3752e7c82b0d480696af9568a6"} Nov 24 14:34:43 crc kubenswrapper[5039]: I1124 14:34:43.990459 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-d4qbg"] Nov 24 14:34:43 crc kubenswrapper[5039]: E1124 14:34:43.991712 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c18cd2cc-a59b-4f16-bb59-f4536b737ef5" containerName="registry-server" Nov 24 14:34:43 crc kubenswrapper[5039]: I1124 14:34:43.991730 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="c18cd2cc-a59b-4f16-bb59-f4536b737ef5" containerName="registry-server" Nov 24 14:34:43 crc kubenswrapper[5039]: E1124 14:34:43.991747 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c18cd2cc-a59b-4f16-bb59-f4536b737ef5" containerName="extract-content" Nov 24 14:34:43 crc kubenswrapper[5039]: I1124 14:34:43.991753 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="c18cd2cc-a59b-4f16-bb59-f4536b737ef5" containerName="extract-content" Nov 24 14:34:43 crc kubenswrapper[5039]: E1124 14:34:43.991776 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c18cd2cc-a59b-4f16-bb59-f4536b737ef5" containerName="extract-utilities" Nov 24 14:34:43 crc kubenswrapper[5039]: I1124 14:34:43.991781 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="c18cd2cc-a59b-4f16-bb59-f4536b737ef5" containerName="extract-utilities" Nov 24 14:34:43 crc kubenswrapper[5039]: I1124 14:34:43.992020 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="c18cd2cc-a59b-4f16-bb59-f4536b737ef5" containerName="registry-server" Nov 24 14:34:43 crc kubenswrapper[5039]: I1124 14:34:43.993784 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d4qbg" Nov 24 14:34:44 crc kubenswrapper[5039]: I1124 14:34:44.013308 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d4qbg"] Nov 24 14:34:44 crc kubenswrapper[5039]: I1124 14:34:44.075529 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13c417b1-2623-48ff-8b9e-5c3926650990-catalog-content\") pod \"community-operators-d4qbg\" (UID: \"13c417b1-2623-48ff-8b9e-5c3926650990\") " pod="openshift-marketplace/community-operators-d4qbg" Nov 24 14:34:44 crc kubenswrapper[5039]: I1124 14:34:44.075597 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzw7w\" (UniqueName: \"kubernetes.io/projected/13c417b1-2623-48ff-8b9e-5c3926650990-kube-api-access-gzw7w\") pod \"community-operators-d4qbg\" (UID: \"13c417b1-2623-48ff-8b9e-5c3926650990\") " pod="openshift-marketplace/community-operators-d4qbg" Nov 24 14:34:44 crc kubenswrapper[5039]: I1124 14:34:44.075743 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13c417b1-2623-48ff-8b9e-5c3926650990-utilities\") pod \"community-operators-d4qbg\" (UID: \"13c417b1-2623-48ff-8b9e-5c3926650990\") " pod="openshift-marketplace/community-operators-d4qbg" Nov 24 14:34:44 crc kubenswrapper[5039]: I1124 14:34:44.177702 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13c417b1-2623-48ff-8b9e-5c3926650990-catalog-content\") pod \"community-operators-d4qbg\" (UID: \"13c417b1-2623-48ff-8b9e-5c3926650990\") " pod="openshift-marketplace/community-operators-d4qbg" Nov 24 14:34:44 crc kubenswrapper[5039]: I1124 14:34:44.177777 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzw7w\" (UniqueName: \"kubernetes.io/projected/13c417b1-2623-48ff-8b9e-5c3926650990-kube-api-access-gzw7w\") pod \"community-operators-d4qbg\" (UID: \"13c417b1-2623-48ff-8b9e-5c3926650990\") " pod="openshift-marketplace/community-operators-d4qbg" Nov 24 14:34:44 crc kubenswrapper[5039]: I1124 14:34:44.177923 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13c417b1-2623-48ff-8b9e-5c3926650990-utilities\") pod \"community-operators-d4qbg\" (UID: \"13c417b1-2623-48ff-8b9e-5c3926650990\") " pod="openshift-marketplace/community-operators-d4qbg" Nov 24 14:34:44 crc kubenswrapper[5039]: I1124 14:34:44.178610 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13c417b1-2623-48ff-8b9e-5c3926650990-catalog-content\") pod \"community-operators-d4qbg\" (UID: \"13c417b1-2623-48ff-8b9e-5c3926650990\") " pod="openshift-marketplace/community-operators-d4qbg" Nov 24 14:34:44 crc kubenswrapper[5039]: I1124 14:34:44.178934 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13c417b1-2623-48ff-8b9e-5c3926650990-utilities\") pod \"community-operators-d4qbg\" (UID: \"13c417b1-2623-48ff-8b9e-5c3926650990\") " pod="openshift-marketplace/community-operators-d4qbg" Nov 24 14:34:44 crc kubenswrapper[5039]: I1124 14:34:44.212600 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzw7w\" (UniqueName: \"kubernetes.io/projected/13c417b1-2623-48ff-8b9e-5c3926650990-kube-api-access-gzw7w\") pod \"community-operators-d4qbg\" (UID: \"13c417b1-2623-48ff-8b9e-5c3926650990\") " pod="openshift-marketplace/community-operators-d4qbg" Nov 24 14:34:44 crc kubenswrapper[5039]: I1124 14:34:44.333764 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d4qbg" Nov 24 14:34:44 crc kubenswrapper[5039]: I1124 14:34:44.927447 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d4qbg"] Nov 24 14:34:45 crc kubenswrapper[5039]: I1124 14:34:45.065952 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d4qbg" event={"ID":"13c417b1-2623-48ff-8b9e-5c3926650990","Type":"ContainerStarted","Data":"c0def22265c8a19eaecae11f20025839cbade7bf9743f12459d3bfbc6a586070"} Nov 24 14:34:46 crc kubenswrapper[5039]: I1124 14:34:46.077915 5039 generic.go:334] "Generic (PLEG): container finished" podID="13c417b1-2623-48ff-8b9e-5c3926650990" containerID="911d0bb802c8d947219304eb3fd56dd3406872cffe3e9cf44ce86f8015519c3f" exitCode=0 Nov 24 14:34:46 crc kubenswrapper[5039]: I1124 14:34:46.077978 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d4qbg" event={"ID":"13c417b1-2623-48ff-8b9e-5c3926650990","Type":"ContainerDied","Data":"911d0bb802c8d947219304eb3fd56dd3406872cffe3e9cf44ce86f8015519c3f"} Nov 24 14:34:47 crc kubenswrapper[5039]: I1124 14:34:47.089900 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d4qbg" event={"ID":"13c417b1-2623-48ff-8b9e-5c3926650990","Type":"ContainerStarted","Data":"597f158c5096d95e79dd913f5fd0daf1df4327d9940d9c0246faf3bfe78ffa99"} Nov 24 14:34:49 crc kubenswrapper[5039]: I1124 14:34:49.110911 5039 generic.go:334] "Generic (PLEG): container finished" podID="13c417b1-2623-48ff-8b9e-5c3926650990" containerID="597f158c5096d95e79dd913f5fd0daf1df4327d9940d9c0246faf3bfe78ffa99" exitCode=0 Nov 24 14:34:49 crc kubenswrapper[5039]: I1124 14:34:49.111010 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d4qbg" event={"ID":"13c417b1-2623-48ff-8b9e-5c3926650990","Type":"ContainerDied","Data":"597f158c5096d95e79dd913f5fd0daf1df4327d9940d9c0246faf3bfe78ffa99"} Nov 24 14:34:51 crc kubenswrapper[5039]: I1124 14:34:51.135418 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d4qbg" event={"ID":"13c417b1-2623-48ff-8b9e-5c3926650990","Type":"ContainerStarted","Data":"43977ab84b2b3016ff69e7d7d3a742938a082e3f4c31016fa13d62d2c934793a"} Nov 24 14:34:51 crc kubenswrapper[5039]: I1124 14:34:51.161147 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-d4qbg" podStartSLOduration=4.262649774 podStartE2EDuration="8.161128724s" podCreationTimestamp="2025-11-24 14:34:43 +0000 UTC" firstStartedPulling="2025-11-24 14:34:46.080583687 +0000 UTC m=+4598.519708207" lastFinishedPulling="2025-11-24 14:34:49.979062637 +0000 UTC m=+4602.418187157" observedRunningTime="2025-11-24 14:34:51.152034001 +0000 UTC m=+4603.591158501" watchObservedRunningTime="2025-11-24 14:34:51.161128724 +0000 UTC m=+4603.600253224" Nov 24 14:34:54 crc kubenswrapper[5039]: I1124 14:34:54.334028 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-d4qbg" Nov 24 14:34:54 crc kubenswrapper[5039]: I1124 14:34:54.334738 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-d4qbg" Nov 24 14:34:54 crc kubenswrapper[5039]: I1124 14:34:54.401811 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-d4qbg" Nov 24 14:34:55 crc kubenswrapper[5039]: I1124 14:34:55.252010 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-d4qbg" Nov 24 14:34:55 crc kubenswrapper[5039]: I1124 14:34:55.343384 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d4qbg"] Nov 24 14:34:57 crc kubenswrapper[5039]: I1124 14:34:57.205771 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-d4qbg" podUID="13c417b1-2623-48ff-8b9e-5c3926650990" containerName="registry-server" containerID="cri-o://43977ab84b2b3016ff69e7d7d3a742938a082e3f4c31016fa13d62d2c934793a" gracePeriod=2 Nov 24 14:34:57 crc kubenswrapper[5039]: I1124 14:34:57.783357 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d4qbg" Nov 24 14:34:57 crc kubenswrapper[5039]: I1124 14:34:57.916617 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzw7w\" (UniqueName: \"kubernetes.io/projected/13c417b1-2623-48ff-8b9e-5c3926650990-kube-api-access-gzw7w\") pod \"13c417b1-2623-48ff-8b9e-5c3926650990\" (UID: \"13c417b1-2623-48ff-8b9e-5c3926650990\") " Nov 24 14:34:57 crc kubenswrapper[5039]: I1124 14:34:57.916671 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13c417b1-2623-48ff-8b9e-5c3926650990-catalog-content\") pod \"13c417b1-2623-48ff-8b9e-5c3926650990\" (UID: \"13c417b1-2623-48ff-8b9e-5c3926650990\") " Nov 24 14:34:57 crc kubenswrapper[5039]: I1124 14:34:57.916793 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13c417b1-2623-48ff-8b9e-5c3926650990-utilities\") pod \"13c417b1-2623-48ff-8b9e-5c3926650990\" (UID: \"13c417b1-2623-48ff-8b9e-5c3926650990\") " Nov 24 14:34:57 crc kubenswrapper[5039]: I1124 14:34:57.918033 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13c417b1-2623-48ff-8b9e-5c3926650990-utilities" (OuterVolumeSpecName: "utilities") pod "13c417b1-2623-48ff-8b9e-5c3926650990" (UID: "13c417b1-2623-48ff-8b9e-5c3926650990"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:34:57 crc kubenswrapper[5039]: I1124 14:34:57.965957 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13c417b1-2623-48ff-8b9e-5c3926650990-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "13c417b1-2623-48ff-8b9e-5c3926650990" (UID: "13c417b1-2623-48ff-8b9e-5c3926650990"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:34:58 crc kubenswrapper[5039]: I1124 14:34:58.019662 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13c417b1-2623-48ff-8b9e-5c3926650990-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:34:58 crc kubenswrapper[5039]: I1124 14:34:58.019698 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13c417b1-2623-48ff-8b9e-5c3926650990-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:34:58 crc kubenswrapper[5039]: I1124 14:34:58.218234 5039 generic.go:334] "Generic (PLEG): container finished" podID="13c417b1-2623-48ff-8b9e-5c3926650990" containerID="43977ab84b2b3016ff69e7d7d3a742938a082e3f4c31016fa13d62d2c934793a" exitCode=0 Nov 24 14:34:58 crc kubenswrapper[5039]: I1124 14:34:58.218273 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d4qbg" event={"ID":"13c417b1-2623-48ff-8b9e-5c3926650990","Type":"ContainerDied","Data":"43977ab84b2b3016ff69e7d7d3a742938a082e3f4c31016fa13d62d2c934793a"} Nov 24 14:34:58 crc kubenswrapper[5039]: I1124 14:34:58.218322 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d4qbg" event={"ID":"13c417b1-2623-48ff-8b9e-5c3926650990","Type":"ContainerDied","Data":"c0def22265c8a19eaecae11f20025839cbade7bf9743f12459d3bfbc6a586070"} Nov 24 14:34:58 crc kubenswrapper[5039]: I1124 14:34:58.218344 5039 scope.go:117] "RemoveContainer" containerID="43977ab84b2b3016ff69e7d7d3a742938a082e3f4c31016fa13d62d2c934793a" Nov 24 14:34:58 crc kubenswrapper[5039]: I1124 14:34:58.218357 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d4qbg" Nov 24 14:34:58 crc kubenswrapper[5039]: I1124 14:34:58.244841 5039 scope.go:117] "RemoveContainer" containerID="597f158c5096d95e79dd913f5fd0daf1df4327d9940d9c0246faf3bfe78ffa99" Nov 24 14:34:58 crc kubenswrapper[5039]: I1124 14:34:58.779104 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13c417b1-2623-48ff-8b9e-5c3926650990-kube-api-access-gzw7w" (OuterVolumeSpecName: "kube-api-access-gzw7w") pod "13c417b1-2623-48ff-8b9e-5c3926650990" (UID: "13c417b1-2623-48ff-8b9e-5c3926650990"). InnerVolumeSpecName "kube-api-access-gzw7w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:34:58 crc kubenswrapper[5039]: I1124 14:34:58.804942 5039 scope.go:117] "RemoveContainer" containerID="911d0bb802c8d947219304eb3fd56dd3406872cffe3e9cf44ce86f8015519c3f" Nov 24 14:34:58 crc kubenswrapper[5039]: I1124 14:34:58.840372 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzw7w\" (UniqueName: \"kubernetes.io/projected/13c417b1-2623-48ff-8b9e-5c3926650990-kube-api-access-gzw7w\") on node \"crc\" DevicePath \"\"" Nov 24 14:34:58 crc kubenswrapper[5039]: I1124 14:34:58.930512 5039 scope.go:117] "RemoveContainer" containerID="43977ab84b2b3016ff69e7d7d3a742938a082e3f4c31016fa13d62d2c934793a" Nov 24 14:34:58 crc kubenswrapper[5039]: E1124 14:34:58.930915 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"43977ab84b2b3016ff69e7d7d3a742938a082e3f4c31016fa13d62d2c934793a\": container with ID starting with 43977ab84b2b3016ff69e7d7d3a742938a082e3f4c31016fa13d62d2c934793a not found: ID does not exist" containerID="43977ab84b2b3016ff69e7d7d3a742938a082e3f4c31016fa13d62d2c934793a" Nov 24 14:34:58 crc kubenswrapper[5039]: I1124 14:34:58.931003 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"43977ab84b2b3016ff69e7d7d3a742938a082e3f4c31016fa13d62d2c934793a"} err="failed to get container status \"43977ab84b2b3016ff69e7d7d3a742938a082e3f4c31016fa13d62d2c934793a\": rpc error: code = NotFound desc = could not find container \"43977ab84b2b3016ff69e7d7d3a742938a082e3f4c31016fa13d62d2c934793a\": container with ID starting with 43977ab84b2b3016ff69e7d7d3a742938a082e3f4c31016fa13d62d2c934793a not found: ID does not exist" Nov 24 14:34:58 crc kubenswrapper[5039]: I1124 14:34:58.931141 5039 scope.go:117] "RemoveContainer" containerID="597f158c5096d95e79dd913f5fd0daf1df4327d9940d9c0246faf3bfe78ffa99" Nov 24 14:34:58 crc kubenswrapper[5039]: E1124 14:34:58.931558 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"597f158c5096d95e79dd913f5fd0daf1df4327d9940d9c0246faf3bfe78ffa99\": container with ID starting with 597f158c5096d95e79dd913f5fd0daf1df4327d9940d9c0246faf3bfe78ffa99 not found: ID does not exist" containerID="597f158c5096d95e79dd913f5fd0daf1df4327d9940d9c0246faf3bfe78ffa99" Nov 24 14:34:58 crc kubenswrapper[5039]: I1124 14:34:58.931635 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"597f158c5096d95e79dd913f5fd0daf1df4327d9940d9c0246faf3bfe78ffa99"} err="failed to get container status \"597f158c5096d95e79dd913f5fd0daf1df4327d9940d9c0246faf3bfe78ffa99\": rpc error: code = NotFound desc = could not find container \"597f158c5096d95e79dd913f5fd0daf1df4327d9940d9c0246faf3bfe78ffa99\": container with ID starting with 597f158c5096d95e79dd913f5fd0daf1df4327d9940d9c0246faf3bfe78ffa99 not found: ID does not exist" Nov 24 14:34:58 crc kubenswrapper[5039]: I1124 14:34:58.931706 5039 scope.go:117] "RemoveContainer" containerID="911d0bb802c8d947219304eb3fd56dd3406872cffe3e9cf44ce86f8015519c3f" Nov 24 14:34:58 crc kubenswrapper[5039]: E1124 14:34:58.932032 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"911d0bb802c8d947219304eb3fd56dd3406872cffe3e9cf44ce86f8015519c3f\": container with ID starting with 911d0bb802c8d947219304eb3fd56dd3406872cffe3e9cf44ce86f8015519c3f not found: ID does not exist" containerID="911d0bb802c8d947219304eb3fd56dd3406872cffe3e9cf44ce86f8015519c3f" Nov 24 14:34:58 crc kubenswrapper[5039]: I1124 14:34:58.932108 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"911d0bb802c8d947219304eb3fd56dd3406872cffe3e9cf44ce86f8015519c3f"} err="failed to get container status \"911d0bb802c8d947219304eb3fd56dd3406872cffe3e9cf44ce86f8015519c3f\": rpc error: code = NotFound desc = could not find container \"911d0bb802c8d947219304eb3fd56dd3406872cffe3e9cf44ce86f8015519c3f\": container with ID starting with 911d0bb802c8d947219304eb3fd56dd3406872cffe3e9cf44ce86f8015519c3f not found: ID does not exist" Nov 24 14:34:58 crc kubenswrapper[5039]: I1124 14:34:58.988999 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d4qbg"] Nov 24 14:34:59 crc kubenswrapper[5039]: I1124 14:34:59.003366 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-d4qbg"] Nov 24 14:35:00 crc kubenswrapper[5039]: I1124 14:35:00.327141 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13c417b1-2623-48ff-8b9e-5c3926650990" path="/var/lib/kubelet/pods/13c417b1-2623-48ff-8b9e-5c3926650990/volumes" Nov 24 14:35:27 crc kubenswrapper[5039]: I1124 14:35:27.395025 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2hhl7"] Nov 24 14:35:27 crc kubenswrapper[5039]: E1124 14:35:27.396234 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13c417b1-2623-48ff-8b9e-5c3926650990" containerName="registry-server" Nov 24 14:35:27 crc kubenswrapper[5039]: I1124 14:35:27.396251 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="13c417b1-2623-48ff-8b9e-5c3926650990" containerName="registry-server" Nov 24 14:35:27 crc kubenswrapper[5039]: E1124 14:35:27.396276 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13c417b1-2623-48ff-8b9e-5c3926650990" containerName="extract-utilities" Nov 24 14:35:27 crc kubenswrapper[5039]: I1124 14:35:27.396284 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="13c417b1-2623-48ff-8b9e-5c3926650990" containerName="extract-utilities" Nov 24 14:35:27 crc kubenswrapper[5039]: E1124 14:35:27.396305 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13c417b1-2623-48ff-8b9e-5c3926650990" containerName="extract-content" Nov 24 14:35:27 crc kubenswrapper[5039]: I1124 14:35:27.396313 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="13c417b1-2623-48ff-8b9e-5c3926650990" containerName="extract-content" Nov 24 14:35:27 crc kubenswrapper[5039]: I1124 14:35:27.396574 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="13c417b1-2623-48ff-8b9e-5c3926650990" containerName="registry-server" Nov 24 14:35:27 crc kubenswrapper[5039]: I1124 14:35:27.398604 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2hhl7" Nov 24 14:35:27 crc kubenswrapper[5039]: I1124 14:35:27.413668 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2hhl7"] Nov 24 14:35:27 crc kubenswrapper[5039]: I1124 14:35:27.510784 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e-catalog-content\") pod \"redhat-marketplace-2hhl7\" (UID: \"31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e\") " pod="openshift-marketplace/redhat-marketplace-2hhl7" Nov 24 14:35:27 crc kubenswrapper[5039]: I1124 14:35:27.511221 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e-utilities\") pod \"redhat-marketplace-2hhl7\" (UID: \"31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e\") " pod="openshift-marketplace/redhat-marketplace-2hhl7" Nov 24 14:35:27 crc kubenswrapper[5039]: I1124 14:35:27.511299 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r968g\" (UniqueName: \"kubernetes.io/projected/31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e-kube-api-access-r968g\") pod \"redhat-marketplace-2hhl7\" (UID: \"31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e\") " pod="openshift-marketplace/redhat-marketplace-2hhl7" Nov 24 14:35:27 crc kubenswrapper[5039]: I1124 14:35:27.614575 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e-catalog-content\") pod \"redhat-marketplace-2hhl7\" (UID: \"31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e\") " pod="openshift-marketplace/redhat-marketplace-2hhl7" Nov 24 14:35:27 crc kubenswrapper[5039]: I1124 14:35:27.614652 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e-utilities\") pod \"redhat-marketplace-2hhl7\" (UID: \"31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e\") " pod="openshift-marketplace/redhat-marketplace-2hhl7" Nov 24 14:35:27 crc kubenswrapper[5039]: I1124 14:35:27.614701 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r968g\" (UniqueName: \"kubernetes.io/projected/31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e-kube-api-access-r968g\") pod \"redhat-marketplace-2hhl7\" (UID: \"31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e\") " pod="openshift-marketplace/redhat-marketplace-2hhl7" Nov 24 14:35:27 crc kubenswrapper[5039]: I1124 14:35:27.615065 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e-catalog-content\") pod \"redhat-marketplace-2hhl7\" (UID: \"31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e\") " pod="openshift-marketplace/redhat-marketplace-2hhl7" Nov 24 14:35:27 crc kubenswrapper[5039]: I1124 14:35:27.615152 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e-utilities\") pod \"redhat-marketplace-2hhl7\" (UID: \"31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e\") " pod="openshift-marketplace/redhat-marketplace-2hhl7" Nov 24 14:35:27 crc kubenswrapper[5039]: I1124 14:35:27.632318 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r968g\" (UniqueName: \"kubernetes.io/projected/31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e-kube-api-access-r968g\") pod \"redhat-marketplace-2hhl7\" (UID: \"31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e\") " pod="openshift-marketplace/redhat-marketplace-2hhl7" Nov 24 14:35:27 crc kubenswrapper[5039]: I1124 14:35:27.720090 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2hhl7" Nov 24 14:35:28 crc kubenswrapper[5039]: I1124 14:35:28.286828 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2hhl7"] Nov 24 14:35:28 crc kubenswrapper[5039]: I1124 14:35:28.616633 5039 generic.go:334] "Generic (PLEG): container finished" podID="31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e" containerID="9b62ad96201db79b58f11b83c777b4c53d72febe36961b2308f18930b6d6db39" exitCode=0 Nov 24 14:35:28 crc kubenswrapper[5039]: I1124 14:35:28.616737 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2hhl7" event={"ID":"31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e","Type":"ContainerDied","Data":"9b62ad96201db79b58f11b83c777b4c53d72febe36961b2308f18930b6d6db39"} Nov 24 14:35:28 crc kubenswrapper[5039]: I1124 14:35:28.616974 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2hhl7" event={"ID":"31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e","Type":"ContainerStarted","Data":"d48e5ca6645300150f7f932e2a06d687abfd17ae3c052ec4ece08dc78153aa2e"} Nov 24 14:35:28 crc kubenswrapper[5039]: I1124 14:35:28.620495 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 14:35:30 crc kubenswrapper[5039]: I1124 14:35:30.643981 5039 generic.go:334] "Generic (PLEG): container finished" podID="31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e" containerID="87d78915474bce53c57e53668f15ed2934b155d78db03987c26e6d2defd54f04" exitCode=0 Nov 24 14:35:30 crc kubenswrapper[5039]: I1124 14:35:30.644218 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2hhl7" event={"ID":"31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e","Type":"ContainerDied","Data":"87d78915474bce53c57e53668f15ed2934b155d78db03987c26e6d2defd54f04"} Nov 24 14:35:31 crc kubenswrapper[5039]: I1124 14:35:31.657224 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2hhl7" event={"ID":"31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e","Type":"ContainerStarted","Data":"8df4ec8823b786a3fe6790b7202848395685d3cefef49b5f8db172c8685a649f"} Nov 24 14:35:31 crc kubenswrapper[5039]: I1124 14:35:31.688093 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2hhl7" podStartSLOduration=2.183468265 podStartE2EDuration="4.688063452s" podCreationTimestamp="2025-11-24 14:35:27 +0000 UTC" firstStartedPulling="2025-11-24 14:35:28.619884607 +0000 UTC m=+4641.059009107" lastFinishedPulling="2025-11-24 14:35:31.124479794 +0000 UTC m=+4643.563604294" observedRunningTime="2025-11-24 14:35:31.676455067 +0000 UTC m=+4644.115579577" watchObservedRunningTime="2025-11-24 14:35:31.688063452 +0000 UTC m=+4644.127187992" Nov 24 14:35:37 crc kubenswrapper[5039]: I1124 14:35:37.719638 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2hhl7" Nov 24 14:35:37 crc kubenswrapper[5039]: I1124 14:35:37.720425 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2hhl7" Nov 24 14:35:38 crc kubenswrapper[5039]: I1124 14:35:38.217227 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2hhl7" Nov 24 14:35:38 crc kubenswrapper[5039]: I1124 14:35:38.804986 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2hhl7" Nov 24 14:35:38 crc kubenswrapper[5039]: I1124 14:35:38.851866 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2hhl7"] Nov 24 14:35:40 crc kubenswrapper[5039]: I1124 14:35:40.771963 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2hhl7" podUID="31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e" containerName="registry-server" containerID="cri-o://8df4ec8823b786a3fe6790b7202848395685d3cefef49b5f8db172c8685a649f" gracePeriod=2 Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.316961 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2hhl7" Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.447594 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r968g\" (UniqueName: \"kubernetes.io/projected/31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e-kube-api-access-r968g\") pod \"31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e\" (UID: \"31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e\") " Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.447731 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e-catalog-content\") pod \"31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e\" (UID: \"31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e\") " Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.448041 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e-utilities\") pod \"31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e\" (UID: \"31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e\") " Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.450634 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e-utilities" (OuterVolumeSpecName: "utilities") pod "31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e" (UID: "31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.454197 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e-kube-api-access-r968g" (OuterVolumeSpecName: "kube-api-access-r968g") pod "31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e" (UID: "31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e"). InnerVolumeSpecName "kube-api-access-r968g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.464591 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e" (UID: "31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.551527 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.551568 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r968g\" (UniqueName: \"kubernetes.io/projected/31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e-kube-api-access-r968g\") on node \"crc\" DevicePath \"\"" Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.551578 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.787415 5039 generic.go:334] "Generic (PLEG): container finished" podID="31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e" containerID="8df4ec8823b786a3fe6790b7202848395685d3cefef49b5f8db172c8685a649f" exitCode=0 Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.787469 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2hhl7" event={"ID":"31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e","Type":"ContainerDied","Data":"8df4ec8823b786a3fe6790b7202848395685d3cefef49b5f8db172c8685a649f"} Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.787523 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2hhl7" event={"ID":"31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e","Type":"ContainerDied","Data":"d48e5ca6645300150f7f932e2a06d687abfd17ae3c052ec4ece08dc78153aa2e"} Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.787540 5039 scope.go:117] "RemoveContainer" containerID="8df4ec8823b786a3fe6790b7202848395685d3cefef49b5f8db172c8685a649f" Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.787553 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2hhl7" Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.813858 5039 scope.go:117] "RemoveContainer" containerID="87d78915474bce53c57e53668f15ed2934b155d78db03987c26e6d2defd54f04" Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.838781 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2hhl7"] Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.849901 5039 scope.go:117] "RemoveContainer" containerID="9b62ad96201db79b58f11b83c777b4c53d72febe36961b2308f18930b6d6db39" Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.850876 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2hhl7"] Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.895422 5039 scope.go:117] "RemoveContainer" containerID="8df4ec8823b786a3fe6790b7202848395685d3cefef49b5f8db172c8685a649f" Nov 24 14:35:41 crc kubenswrapper[5039]: E1124 14:35:41.895932 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8df4ec8823b786a3fe6790b7202848395685d3cefef49b5f8db172c8685a649f\": container with ID starting with 8df4ec8823b786a3fe6790b7202848395685d3cefef49b5f8db172c8685a649f not found: ID does not exist" containerID="8df4ec8823b786a3fe6790b7202848395685d3cefef49b5f8db172c8685a649f" Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.895966 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8df4ec8823b786a3fe6790b7202848395685d3cefef49b5f8db172c8685a649f"} err="failed to get container status \"8df4ec8823b786a3fe6790b7202848395685d3cefef49b5f8db172c8685a649f\": rpc error: code = NotFound desc = could not find container \"8df4ec8823b786a3fe6790b7202848395685d3cefef49b5f8db172c8685a649f\": container with ID starting with 8df4ec8823b786a3fe6790b7202848395685d3cefef49b5f8db172c8685a649f not found: ID does not exist" Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.895998 5039 scope.go:117] "RemoveContainer" containerID="87d78915474bce53c57e53668f15ed2934b155d78db03987c26e6d2defd54f04" Nov 24 14:35:41 crc kubenswrapper[5039]: E1124 14:35:41.896410 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87d78915474bce53c57e53668f15ed2934b155d78db03987c26e6d2defd54f04\": container with ID starting with 87d78915474bce53c57e53668f15ed2934b155d78db03987c26e6d2defd54f04 not found: ID does not exist" containerID="87d78915474bce53c57e53668f15ed2934b155d78db03987c26e6d2defd54f04" Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.896439 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87d78915474bce53c57e53668f15ed2934b155d78db03987c26e6d2defd54f04"} err="failed to get container status \"87d78915474bce53c57e53668f15ed2934b155d78db03987c26e6d2defd54f04\": rpc error: code = NotFound desc = could not find container \"87d78915474bce53c57e53668f15ed2934b155d78db03987c26e6d2defd54f04\": container with ID starting with 87d78915474bce53c57e53668f15ed2934b155d78db03987c26e6d2defd54f04 not found: ID does not exist" Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.896460 5039 scope.go:117] "RemoveContainer" containerID="9b62ad96201db79b58f11b83c777b4c53d72febe36961b2308f18930b6d6db39" Nov 24 14:35:41 crc kubenswrapper[5039]: E1124 14:35:41.897094 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b62ad96201db79b58f11b83c777b4c53d72febe36961b2308f18930b6d6db39\": container with ID starting with 9b62ad96201db79b58f11b83c777b4c53d72febe36961b2308f18930b6d6db39 not found: ID does not exist" containerID="9b62ad96201db79b58f11b83c777b4c53d72febe36961b2308f18930b6d6db39" Nov 24 14:35:41 crc kubenswrapper[5039]: I1124 14:35:41.897124 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b62ad96201db79b58f11b83c777b4c53d72febe36961b2308f18930b6d6db39"} err="failed to get container status \"9b62ad96201db79b58f11b83c777b4c53d72febe36961b2308f18930b6d6db39\": rpc error: code = NotFound desc = could not find container \"9b62ad96201db79b58f11b83c777b4c53d72febe36961b2308f18930b6d6db39\": container with ID starting with 9b62ad96201db79b58f11b83c777b4c53d72febe36961b2308f18930b6d6db39 not found: ID does not exist" Nov 24 14:35:42 crc kubenswrapper[5039]: I1124 14:35:42.320762 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e" path="/var/lib/kubelet/pods/31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e/volumes" Nov 24 14:35:54 crc kubenswrapper[5039]: I1124 14:35:54.673082 5039 scope.go:117] "RemoveContainer" containerID="9894b19bd17da2eda60b53363718f20a91bc4721b85a456121e60cfe5a83ae30" Nov 24 14:35:54 crc kubenswrapper[5039]: I1124 14:35:54.697341 5039 scope.go:117] "RemoveContainer" containerID="5eccbedbb9e1904cad4b8d08630f3b76f1e76da361f1438c122667fe704b183f" Nov 24 14:35:54 crc kubenswrapper[5039]: I1124 14:35:54.730564 5039 scope.go:117] "RemoveContainer" containerID="0e115499a4efe752aa8e738bb04ce1bce19142c2afb33be130d47974ffcf4b6b" Nov 24 14:36:50 crc kubenswrapper[5039]: I1124 14:36:50.101768 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:36:50 crc kubenswrapper[5039]: I1124 14:36:50.102375 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:36:54 crc kubenswrapper[5039]: I1124 14:36:54.815698 5039 scope.go:117] "RemoveContainer" containerID="6166b96488451436dcc3cdb61d575d0c3dff77f100ae949a59254bf6a559ba52" Nov 24 14:37:01 crc kubenswrapper[5039]: I1124 14:37:01.814726 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zggqn"] Nov 24 14:37:01 crc kubenswrapper[5039]: E1124 14:37:01.816102 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e" containerName="registry-server" Nov 24 14:37:01 crc kubenswrapper[5039]: I1124 14:37:01.816125 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e" containerName="registry-server" Nov 24 14:37:01 crc kubenswrapper[5039]: E1124 14:37:01.816145 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e" containerName="extract-content" Nov 24 14:37:01 crc kubenswrapper[5039]: I1124 14:37:01.816156 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e" containerName="extract-content" Nov 24 14:37:01 crc kubenswrapper[5039]: E1124 14:37:01.816171 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e" containerName="extract-utilities" Nov 24 14:37:01 crc kubenswrapper[5039]: I1124 14:37:01.816183 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e" containerName="extract-utilities" Nov 24 14:37:01 crc kubenswrapper[5039]: I1124 14:37:01.816658 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="31978f4b-f6a2-4ca0-886d-1e4a2cf19d6e" containerName="registry-server" Nov 24 14:37:01 crc kubenswrapper[5039]: I1124 14:37:01.819461 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zggqn" Nov 24 14:37:01 crc kubenswrapper[5039]: I1124 14:37:01.843990 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zggqn"] Nov 24 14:37:01 crc kubenswrapper[5039]: I1124 14:37:01.981927 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711-utilities\") pod \"certified-operators-zggqn\" (UID: \"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711\") " pod="openshift-marketplace/certified-operators-zggqn" Nov 24 14:37:01 crc kubenswrapper[5039]: I1124 14:37:01.982120 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711-catalog-content\") pod \"certified-operators-zggqn\" (UID: \"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711\") " pod="openshift-marketplace/certified-operators-zggqn" Nov 24 14:37:01 crc kubenswrapper[5039]: I1124 14:37:01.982175 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-229fg\" (UniqueName: \"kubernetes.io/projected/e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711-kube-api-access-229fg\") pod \"certified-operators-zggqn\" (UID: \"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711\") " pod="openshift-marketplace/certified-operators-zggqn" Nov 24 14:37:02 crc kubenswrapper[5039]: I1124 14:37:02.084632 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711-utilities\") pod \"certified-operators-zggqn\" (UID: \"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711\") " pod="openshift-marketplace/certified-operators-zggqn" Nov 24 14:37:02 crc kubenswrapper[5039]: I1124 14:37:02.084737 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711-catalog-content\") pod \"certified-operators-zggqn\" (UID: \"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711\") " pod="openshift-marketplace/certified-operators-zggqn" Nov 24 14:37:02 crc kubenswrapper[5039]: I1124 14:37:02.084774 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-229fg\" (UniqueName: \"kubernetes.io/projected/e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711-kube-api-access-229fg\") pod \"certified-operators-zggqn\" (UID: \"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711\") " pod="openshift-marketplace/certified-operators-zggqn" Nov 24 14:37:02 crc kubenswrapper[5039]: I1124 14:37:02.085080 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711-utilities\") pod \"certified-operators-zggqn\" (UID: \"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711\") " pod="openshift-marketplace/certified-operators-zggqn" Nov 24 14:37:02 crc kubenswrapper[5039]: I1124 14:37:02.085211 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711-catalog-content\") pod \"certified-operators-zggqn\" (UID: \"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711\") " pod="openshift-marketplace/certified-operators-zggqn" Nov 24 14:37:02 crc kubenswrapper[5039]: I1124 14:37:02.103470 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-229fg\" (UniqueName: \"kubernetes.io/projected/e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711-kube-api-access-229fg\") pod \"certified-operators-zggqn\" (UID: \"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711\") " pod="openshift-marketplace/certified-operators-zggqn" Nov 24 14:37:02 crc kubenswrapper[5039]: I1124 14:37:02.159057 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zggqn" Nov 24 14:37:03 crc kubenswrapper[5039]: I1124 14:37:02.697358 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zggqn"] Nov 24 14:37:03 crc kubenswrapper[5039]: I1124 14:37:02.822751 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zggqn" event={"ID":"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711","Type":"ContainerStarted","Data":"31f29bb741eda9de838d2064401dee116044f4145e7b8704a1f1ff6631b8a53c"} Nov 24 14:37:03 crc kubenswrapper[5039]: I1124 14:37:03.834550 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zggqn" event={"ID":"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711","Type":"ContainerStarted","Data":"9c5b3fce2f98c2fed9217beb6c73ed331309cd6ac9fbe751a4c0d03dc472fe07"} Nov 24 14:37:04 crc kubenswrapper[5039]: I1124 14:37:04.873185 5039 generic.go:334] "Generic (PLEG): container finished" podID="e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711" containerID="9c5b3fce2f98c2fed9217beb6c73ed331309cd6ac9fbe751a4c0d03dc472fe07" exitCode=0 Nov 24 14:37:04 crc kubenswrapper[5039]: I1124 14:37:04.873285 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zggqn" event={"ID":"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711","Type":"ContainerDied","Data":"9c5b3fce2f98c2fed9217beb6c73ed331309cd6ac9fbe751a4c0d03dc472fe07"} Nov 24 14:37:06 crc kubenswrapper[5039]: I1124 14:37:06.896093 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zggqn" event={"ID":"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711","Type":"ContainerStarted","Data":"3f791c077f37866b13efe314a1386e9ff6240c72d254d3218cffe5edd6bed8d3"} Nov 24 14:37:07 crc kubenswrapper[5039]: I1124 14:37:07.910787 5039 generic.go:334] "Generic (PLEG): container finished" podID="e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711" containerID="3f791c077f37866b13efe314a1386e9ff6240c72d254d3218cffe5edd6bed8d3" exitCode=0 Nov 24 14:37:07 crc kubenswrapper[5039]: I1124 14:37:07.910917 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zggqn" event={"ID":"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711","Type":"ContainerDied","Data":"3f791c077f37866b13efe314a1386e9ff6240c72d254d3218cffe5edd6bed8d3"} Nov 24 14:37:08 crc kubenswrapper[5039]: I1124 14:37:08.925101 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zggqn" event={"ID":"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711","Type":"ContainerStarted","Data":"10d7351c34786016dbb86a9243c588ba16cdb9c1255570f6195f773e42b76912"} Nov 24 14:37:08 crc kubenswrapper[5039]: I1124 14:37:08.952681 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zggqn" podStartSLOduration=4.504592419 podStartE2EDuration="7.952663698s" podCreationTimestamp="2025-11-24 14:37:01 +0000 UTC" firstStartedPulling="2025-11-24 14:37:04.879018486 +0000 UTC m=+4737.318142986" lastFinishedPulling="2025-11-24 14:37:08.327089775 +0000 UTC m=+4740.766214265" observedRunningTime="2025-11-24 14:37:08.95067544 +0000 UTC m=+4741.389799940" watchObservedRunningTime="2025-11-24 14:37:08.952663698 +0000 UTC m=+4741.391788198" Nov 24 14:37:12 crc kubenswrapper[5039]: I1124 14:37:12.159923 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zggqn" Nov 24 14:37:12 crc kubenswrapper[5039]: I1124 14:37:12.160712 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zggqn" Nov 24 14:37:12 crc kubenswrapper[5039]: I1124 14:37:12.203191 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zggqn" Nov 24 14:37:20 crc kubenswrapper[5039]: I1124 14:37:20.102859 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:37:20 crc kubenswrapper[5039]: I1124 14:37:20.103367 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:37:22 crc kubenswrapper[5039]: I1124 14:37:22.208057 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zggqn" Nov 24 14:37:22 crc kubenswrapper[5039]: I1124 14:37:22.265007 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zggqn"] Nov 24 14:37:23 crc kubenswrapper[5039]: I1124 14:37:23.070431 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zggqn" podUID="e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711" containerName="registry-server" containerID="cri-o://10d7351c34786016dbb86a9243c588ba16cdb9c1255570f6195f773e42b76912" gracePeriod=2 Nov 24 14:37:23 crc kubenswrapper[5039]: I1124 14:37:23.668822 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zggqn" Nov 24 14:37:23 crc kubenswrapper[5039]: I1124 14:37:23.850190 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711-utilities\") pod \"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711\" (UID: \"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711\") " Nov 24 14:37:23 crc kubenswrapper[5039]: I1124 14:37:23.850565 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-229fg\" (UniqueName: \"kubernetes.io/projected/e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711-kube-api-access-229fg\") pod \"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711\" (UID: \"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711\") " Nov 24 14:37:23 crc kubenswrapper[5039]: I1124 14:37:23.850661 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711-catalog-content\") pod \"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711\" (UID: \"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711\") " Nov 24 14:37:23 crc kubenswrapper[5039]: I1124 14:37:23.851835 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711-utilities" (OuterVolumeSpecName: "utilities") pod "e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711" (UID: "e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:37:23 crc kubenswrapper[5039]: I1124 14:37:23.856865 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711-kube-api-access-229fg" (OuterVolumeSpecName: "kube-api-access-229fg") pod "e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711" (UID: "e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711"). InnerVolumeSpecName "kube-api-access-229fg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:37:23 crc kubenswrapper[5039]: I1124 14:37:23.912833 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711" (UID: "e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:37:23 crc kubenswrapper[5039]: I1124 14:37:23.953432 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:37:23 crc kubenswrapper[5039]: I1124 14:37:23.953756 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-229fg\" (UniqueName: \"kubernetes.io/projected/e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711-kube-api-access-229fg\") on node \"crc\" DevicePath \"\"" Nov 24 14:37:23 crc kubenswrapper[5039]: I1124 14:37:23.953906 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:37:24 crc kubenswrapper[5039]: I1124 14:37:24.095694 5039 generic.go:334] "Generic (PLEG): container finished" podID="e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711" containerID="10d7351c34786016dbb86a9243c588ba16cdb9c1255570f6195f773e42b76912" exitCode=0 Nov 24 14:37:24 crc kubenswrapper[5039]: I1124 14:37:24.095740 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zggqn" event={"ID":"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711","Type":"ContainerDied","Data":"10d7351c34786016dbb86a9243c588ba16cdb9c1255570f6195f773e42b76912"} Nov 24 14:37:24 crc kubenswrapper[5039]: I1124 14:37:24.095769 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zggqn" event={"ID":"e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711","Type":"ContainerDied","Data":"31f29bb741eda9de838d2064401dee116044f4145e7b8704a1f1ff6631b8a53c"} Nov 24 14:37:24 crc kubenswrapper[5039]: I1124 14:37:24.095776 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zggqn" Nov 24 14:37:24 crc kubenswrapper[5039]: I1124 14:37:24.095805 5039 scope.go:117] "RemoveContainer" containerID="10d7351c34786016dbb86a9243c588ba16cdb9c1255570f6195f773e42b76912" Nov 24 14:37:24 crc kubenswrapper[5039]: I1124 14:37:24.125325 5039 scope.go:117] "RemoveContainer" containerID="3f791c077f37866b13efe314a1386e9ff6240c72d254d3218cffe5edd6bed8d3" Nov 24 14:37:24 crc kubenswrapper[5039]: I1124 14:37:24.138556 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zggqn"] Nov 24 14:37:24 crc kubenswrapper[5039]: I1124 14:37:24.151762 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zggqn"] Nov 24 14:37:24 crc kubenswrapper[5039]: I1124 14:37:24.157793 5039 scope.go:117] "RemoveContainer" containerID="9c5b3fce2f98c2fed9217beb6c73ed331309cd6ac9fbe751a4c0d03dc472fe07" Nov 24 14:37:24 crc kubenswrapper[5039]: I1124 14:37:24.222830 5039 scope.go:117] "RemoveContainer" containerID="10d7351c34786016dbb86a9243c588ba16cdb9c1255570f6195f773e42b76912" Nov 24 14:37:24 crc kubenswrapper[5039]: E1124 14:37:24.223325 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10d7351c34786016dbb86a9243c588ba16cdb9c1255570f6195f773e42b76912\": container with ID starting with 10d7351c34786016dbb86a9243c588ba16cdb9c1255570f6195f773e42b76912 not found: ID does not exist" containerID="10d7351c34786016dbb86a9243c588ba16cdb9c1255570f6195f773e42b76912" Nov 24 14:37:24 crc kubenswrapper[5039]: I1124 14:37:24.223366 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10d7351c34786016dbb86a9243c588ba16cdb9c1255570f6195f773e42b76912"} err="failed to get container status \"10d7351c34786016dbb86a9243c588ba16cdb9c1255570f6195f773e42b76912\": rpc error: code = NotFound desc = could not find container \"10d7351c34786016dbb86a9243c588ba16cdb9c1255570f6195f773e42b76912\": container with ID starting with 10d7351c34786016dbb86a9243c588ba16cdb9c1255570f6195f773e42b76912 not found: ID does not exist" Nov 24 14:37:24 crc kubenswrapper[5039]: I1124 14:37:24.223402 5039 scope.go:117] "RemoveContainer" containerID="3f791c077f37866b13efe314a1386e9ff6240c72d254d3218cffe5edd6bed8d3" Nov 24 14:37:24 crc kubenswrapper[5039]: E1124 14:37:24.223826 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f791c077f37866b13efe314a1386e9ff6240c72d254d3218cffe5edd6bed8d3\": container with ID starting with 3f791c077f37866b13efe314a1386e9ff6240c72d254d3218cffe5edd6bed8d3 not found: ID does not exist" containerID="3f791c077f37866b13efe314a1386e9ff6240c72d254d3218cffe5edd6bed8d3" Nov 24 14:37:24 crc kubenswrapper[5039]: I1124 14:37:24.223876 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f791c077f37866b13efe314a1386e9ff6240c72d254d3218cffe5edd6bed8d3"} err="failed to get container status \"3f791c077f37866b13efe314a1386e9ff6240c72d254d3218cffe5edd6bed8d3\": rpc error: code = NotFound desc = could not find container \"3f791c077f37866b13efe314a1386e9ff6240c72d254d3218cffe5edd6bed8d3\": container with ID starting with 3f791c077f37866b13efe314a1386e9ff6240c72d254d3218cffe5edd6bed8d3 not found: ID does not exist" Nov 24 14:37:24 crc kubenswrapper[5039]: I1124 14:37:24.223905 5039 scope.go:117] "RemoveContainer" containerID="9c5b3fce2f98c2fed9217beb6c73ed331309cd6ac9fbe751a4c0d03dc472fe07" Nov 24 14:37:24 crc kubenswrapper[5039]: E1124 14:37:24.224225 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c5b3fce2f98c2fed9217beb6c73ed331309cd6ac9fbe751a4c0d03dc472fe07\": container with ID starting with 9c5b3fce2f98c2fed9217beb6c73ed331309cd6ac9fbe751a4c0d03dc472fe07 not found: ID does not exist" containerID="9c5b3fce2f98c2fed9217beb6c73ed331309cd6ac9fbe751a4c0d03dc472fe07" Nov 24 14:37:24 crc kubenswrapper[5039]: I1124 14:37:24.224257 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c5b3fce2f98c2fed9217beb6c73ed331309cd6ac9fbe751a4c0d03dc472fe07"} err="failed to get container status \"9c5b3fce2f98c2fed9217beb6c73ed331309cd6ac9fbe751a4c0d03dc472fe07\": rpc error: code = NotFound desc = could not find container \"9c5b3fce2f98c2fed9217beb6c73ed331309cd6ac9fbe751a4c0d03dc472fe07\": container with ID starting with 9c5b3fce2f98c2fed9217beb6c73ed331309cd6ac9fbe751a4c0d03dc472fe07 not found: ID does not exist" Nov 24 14:37:24 crc kubenswrapper[5039]: I1124 14:37:24.318654 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711" path="/var/lib/kubelet/pods/e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711/volumes" Nov 24 14:37:50 crc kubenswrapper[5039]: I1124 14:37:50.101808 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:37:50 crc kubenswrapper[5039]: I1124 14:37:50.102335 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:37:50 crc kubenswrapper[5039]: I1124 14:37:50.102393 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 14:37:50 crc kubenswrapper[5039]: I1124 14:37:50.103203 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"08fa3c908c9993cba7053804a49fe91058545f3752e7c82b0d480696af9568a6"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 14:37:50 crc kubenswrapper[5039]: I1124 14:37:50.103257 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://08fa3c908c9993cba7053804a49fe91058545f3752e7c82b0d480696af9568a6" gracePeriod=600 Nov 24 14:37:50 crc kubenswrapper[5039]: I1124 14:37:50.425071 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="08fa3c908c9993cba7053804a49fe91058545f3752e7c82b0d480696af9568a6" exitCode=0 Nov 24 14:37:50 crc kubenswrapper[5039]: I1124 14:37:50.425108 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"08fa3c908c9993cba7053804a49fe91058545f3752e7c82b0d480696af9568a6"} Nov 24 14:37:50 crc kubenswrapper[5039]: I1124 14:37:50.425520 5039 scope.go:117] "RemoveContainer" containerID="e8e75c1f217d6a4392b7de00b0167726c6f7bf2f34f5b5d27ed1e4a8b1c64e40" Nov 24 14:37:51 crc kubenswrapper[5039]: I1124 14:37:51.439485 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa"} Nov 24 14:39:46 crc kubenswrapper[5039]: I1124 14:39:46.078337 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-create-9lfjc"] Nov 24 14:39:46 crc kubenswrapper[5039]: I1124 14:39:46.091801 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-7fe4-account-create-sp5d2"] Nov 24 14:39:46 crc kubenswrapper[5039]: I1124 14:39:46.107233 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-7fe4-account-create-sp5d2"] Nov 24 14:39:46 crc kubenswrapper[5039]: I1124 14:39:46.119656 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-create-9lfjc"] Nov 24 14:39:46 crc kubenswrapper[5039]: I1124 14:39:46.329858 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b67cb20-74e6-4bfe-b117-b86937dbd140" path="/var/lib/kubelet/pods/2b67cb20-74e6-4bfe-b117-b86937dbd140/volumes" Nov 24 14:39:46 crc kubenswrapper[5039]: I1124 14:39:46.331288 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="943f0212-e353-41e8-9c0c-e1f1dc5d2649" path="/var/lib/kubelet/pods/943f0212-e353-41e8-9c0c-e1f1dc5d2649/volumes" Nov 24 14:39:50 crc kubenswrapper[5039]: I1124 14:39:50.101135 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:39:50 crc kubenswrapper[5039]: I1124 14:39:50.101695 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:39:51 crc kubenswrapper[5039]: E1124 14:39:51.714188 5039 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.175:52038->38.102.83.175:41425: read tcp 38.102.83.175:52038->38.102.83.175:41425: read: connection reset by peer Nov 24 14:39:51 crc kubenswrapper[5039]: E1124 14:39:51.714621 5039 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.175:52038->38.102.83.175:41425: write tcp 38.102.83.175:52038->38.102.83.175:41425: write: broken pipe Nov 24 14:39:55 crc kubenswrapper[5039]: I1124 14:39:55.094622 5039 scope.go:117] "RemoveContainer" containerID="6ccc40f71c97f856119f4dd617bd49620c86b8b462b5d87e26fc3ab3bcb61a10" Nov 24 14:39:55 crc kubenswrapper[5039]: I1124 14:39:55.124744 5039 scope.go:117] "RemoveContainer" containerID="ad160731468b1f2b18827d3ef5783f8094328d2d0e0690b80cd407cbf7365968" Nov 24 14:40:16 crc kubenswrapper[5039]: I1124 14:40:16.041609 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-sync-2th7h"] Nov 24 14:40:16 crc kubenswrapper[5039]: I1124 14:40:16.053225 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-sync-2th7h"] Nov 24 14:40:16 crc kubenswrapper[5039]: I1124 14:40:16.317767 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42960250-7fd2-4db6-9670-dfbe653c2713" path="/var/lib/kubelet/pods/42960250-7fd2-4db6-9670-dfbe653c2713/volumes" Nov 24 14:40:20 crc kubenswrapper[5039]: I1124 14:40:20.101736 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:40:20 crc kubenswrapper[5039]: I1124 14:40:20.102241 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:40:50 crc kubenswrapper[5039]: I1124 14:40:50.101141 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:40:50 crc kubenswrapper[5039]: I1124 14:40:50.101770 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:40:50 crc kubenswrapper[5039]: I1124 14:40:50.101827 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 14:40:50 crc kubenswrapper[5039]: I1124 14:40:50.102713 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 14:40:50 crc kubenswrapper[5039]: I1124 14:40:50.102770 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" gracePeriod=600 Nov 24 14:40:50 crc kubenswrapper[5039]: E1124 14:40:50.230333 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:40:50 crc kubenswrapper[5039]: I1124 14:40:50.587636 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" exitCode=0 Nov 24 14:40:50 crc kubenswrapper[5039]: I1124 14:40:50.587985 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa"} Nov 24 14:40:50 crc kubenswrapper[5039]: I1124 14:40:50.588021 5039 scope.go:117] "RemoveContainer" containerID="08fa3c908c9993cba7053804a49fe91058545f3752e7c82b0d480696af9568a6" Nov 24 14:40:50 crc kubenswrapper[5039]: I1124 14:40:50.588766 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:40:50 crc kubenswrapper[5039]: E1124 14:40:50.589040 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:40:55 crc kubenswrapper[5039]: I1124 14:40:55.255983 5039 scope.go:117] "RemoveContainer" containerID="e097a0bd56c67847fa002a9d01e01f3945f804fe690abdda16c508927ebb3b10" Nov 24 14:41:01 crc kubenswrapper[5039]: I1124 14:41:01.307482 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:41:01 crc kubenswrapper[5039]: E1124 14:41:01.308412 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:41:12 crc kubenswrapper[5039]: I1124 14:41:12.309005 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:41:12 crc kubenswrapper[5039]: E1124 14:41:12.310110 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:41:25 crc kubenswrapper[5039]: I1124 14:41:25.307533 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:41:25 crc kubenswrapper[5039]: E1124 14:41:25.308406 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:41:39 crc kubenswrapper[5039]: I1124 14:41:39.306993 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:41:39 crc kubenswrapper[5039]: E1124 14:41:39.308057 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:41:52 crc kubenswrapper[5039]: I1124 14:41:52.307764 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:41:52 crc kubenswrapper[5039]: E1124 14:41:52.308844 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:42:03 crc kubenswrapper[5039]: I1124 14:42:03.307577 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:42:03 crc kubenswrapper[5039]: E1124 14:42:03.308286 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:42:14 crc kubenswrapper[5039]: I1124 14:42:14.307533 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:42:14 crc kubenswrapper[5039]: E1124 14:42:14.308324 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:42:15 crc kubenswrapper[5039]: I1124 14:42:15.239298 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-27h4j"] Nov 24 14:42:15 crc kubenswrapper[5039]: E1124 14:42:15.239859 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711" containerName="extract-content" Nov 24 14:42:15 crc kubenswrapper[5039]: I1124 14:42:15.239881 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711" containerName="extract-content" Nov 24 14:42:15 crc kubenswrapper[5039]: E1124 14:42:15.239977 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711" containerName="registry-server" Nov 24 14:42:15 crc kubenswrapper[5039]: I1124 14:42:15.239989 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711" containerName="registry-server" Nov 24 14:42:15 crc kubenswrapper[5039]: E1124 14:42:15.240007 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711" containerName="extract-utilities" Nov 24 14:42:15 crc kubenswrapper[5039]: I1124 14:42:15.240015 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711" containerName="extract-utilities" Nov 24 14:42:15 crc kubenswrapper[5039]: I1124 14:42:15.240261 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6b6c3d8-0e1d-45aa-922c-d92d0bc0e711" containerName="registry-server" Nov 24 14:42:15 crc kubenswrapper[5039]: I1124 14:42:15.242183 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-27h4j" Nov 24 14:42:15 crc kubenswrapper[5039]: I1124 14:42:15.249484 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-27h4j"] Nov 24 14:42:15 crc kubenswrapper[5039]: I1124 14:42:15.320719 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cfb56e18-4b61-456f-9159-6ed73cfa0aca-catalog-content\") pod \"redhat-operators-27h4j\" (UID: \"cfb56e18-4b61-456f-9159-6ed73cfa0aca\") " pod="openshift-marketplace/redhat-operators-27h4j" Nov 24 14:42:15 crc kubenswrapper[5039]: I1124 14:42:15.320871 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cfb56e18-4b61-456f-9159-6ed73cfa0aca-utilities\") pod \"redhat-operators-27h4j\" (UID: \"cfb56e18-4b61-456f-9159-6ed73cfa0aca\") " pod="openshift-marketplace/redhat-operators-27h4j" Nov 24 14:42:15 crc kubenswrapper[5039]: I1124 14:42:15.320925 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5hnp\" (UniqueName: \"kubernetes.io/projected/cfb56e18-4b61-456f-9159-6ed73cfa0aca-kube-api-access-p5hnp\") pod \"redhat-operators-27h4j\" (UID: \"cfb56e18-4b61-456f-9159-6ed73cfa0aca\") " pod="openshift-marketplace/redhat-operators-27h4j" Nov 24 14:42:15 crc kubenswrapper[5039]: I1124 14:42:15.424018 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cfb56e18-4b61-456f-9159-6ed73cfa0aca-utilities\") pod \"redhat-operators-27h4j\" (UID: \"cfb56e18-4b61-456f-9159-6ed73cfa0aca\") " pod="openshift-marketplace/redhat-operators-27h4j" Nov 24 14:42:15 crc kubenswrapper[5039]: I1124 14:42:15.424130 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5hnp\" (UniqueName: \"kubernetes.io/projected/cfb56e18-4b61-456f-9159-6ed73cfa0aca-kube-api-access-p5hnp\") pod \"redhat-operators-27h4j\" (UID: \"cfb56e18-4b61-456f-9159-6ed73cfa0aca\") " pod="openshift-marketplace/redhat-operators-27h4j" Nov 24 14:42:15 crc kubenswrapper[5039]: I1124 14:42:15.424293 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cfb56e18-4b61-456f-9159-6ed73cfa0aca-catalog-content\") pod \"redhat-operators-27h4j\" (UID: \"cfb56e18-4b61-456f-9159-6ed73cfa0aca\") " pod="openshift-marketplace/redhat-operators-27h4j" Nov 24 14:42:15 crc kubenswrapper[5039]: I1124 14:42:15.424668 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cfb56e18-4b61-456f-9159-6ed73cfa0aca-utilities\") pod \"redhat-operators-27h4j\" (UID: \"cfb56e18-4b61-456f-9159-6ed73cfa0aca\") " pod="openshift-marketplace/redhat-operators-27h4j" Nov 24 14:42:15 crc kubenswrapper[5039]: I1124 14:42:15.424698 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cfb56e18-4b61-456f-9159-6ed73cfa0aca-catalog-content\") pod \"redhat-operators-27h4j\" (UID: \"cfb56e18-4b61-456f-9159-6ed73cfa0aca\") " pod="openshift-marketplace/redhat-operators-27h4j" Nov 24 14:42:15 crc kubenswrapper[5039]: I1124 14:42:15.579256 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5hnp\" (UniqueName: \"kubernetes.io/projected/cfb56e18-4b61-456f-9159-6ed73cfa0aca-kube-api-access-p5hnp\") pod \"redhat-operators-27h4j\" (UID: \"cfb56e18-4b61-456f-9159-6ed73cfa0aca\") " pod="openshift-marketplace/redhat-operators-27h4j" Nov 24 14:42:15 crc kubenswrapper[5039]: I1124 14:42:15.875776 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-27h4j" Nov 24 14:42:16 crc kubenswrapper[5039]: I1124 14:42:16.414375 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-27h4j"] Nov 24 14:42:16 crc kubenswrapper[5039]: I1124 14:42:16.649082 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-27h4j" event={"ID":"cfb56e18-4b61-456f-9159-6ed73cfa0aca","Type":"ContainerStarted","Data":"18f0c6581dcf3cbc6ba673ffd56173fba468fed72eaec007c61aafdc83097562"} Nov 24 14:42:16 crc kubenswrapper[5039]: I1124 14:42:16.649131 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-27h4j" event={"ID":"cfb56e18-4b61-456f-9159-6ed73cfa0aca","Type":"ContainerStarted","Data":"41d4228b643b3cd5d7e02425dc07e6886ace06670c7e37f0bb4e45bd5c4d3ed3"} Nov 24 14:42:17 crc kubenswrapper[5039]: I1124 14:42:17.660543 5039 generic.go:334] "Generic (PLEG): container finished" podID="cfb56e18-4b61-456f-9159-6ed73cfa0aca" containerID="18f0c6581dcf3cbc6ba673ffd56173fba468fed72eaec007c61aafdc83097562" exitCode=0 Nov 24 14:42:17 crc kubenswrapper[5039]: I1124 14:42:17.660688 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-27h4j" event={"ID":"cfb56e18-4b61-456f-9159-6ed73cfa0aca","Type":"ContainerDied","Data":"18f0c6581dcf3cbc6ba673ffd56173fba468fed72eaec007c61aafdc83097562"} Nov 24 14:42:17 crc kubenswrapper[5039]: I1124 14:42:17.662493 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 14:42:19 crc kubenswrapper[5039]: I1124 14:42:19.684023 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-27h4j" event={"ID":"cfb56e18-4b61-456f-9159-6ed73cfa0aca","Type":"ContainerStarted","Data":"d0a7085c56a4c258b699b75fc136d03e7bfa2a8485159aa38586c8616397ea53"} Nov 24 14:42:22 crc kubenswrapper[5039]: I1124 14:42:22.748170 5039 generic.go:334] "Generic (PLEG): container finished" podID="cfb56e18-4b61-456f-9159-6ed73cfa0aca" containerID="d0a7085c56a4c258b699b75fc136d03e7bfa2a8485159aa38586c8616397ea53" exitCode=0 Nov 24 14:42:22 crc kubenswrapper[5039]: I1124 14:42:22.748257 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-27h4j" event={"ID":"cfb56e18-4b61-456f-9159-6ed73cfa0aca","Type":"ContainerDied","Data":"d0a7085c56a4c258b699b75fc136d03e7bfa2a8485159aa38586c8616397ea53"} Nov 24 14:42:23 crc kubenswrapper[5039]: I1124 14:42:23.761048 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-27h4j" event={"ID":"cfb56e18-4b61-456f-9159-6ed73cfa0aca","Type":"ContainerStarted","Data":"17f848c98dd5f02f6ad66ebff581baa64c5140c01843e95c72e106667b55e057"} Nov 24 14:42:23 crc kubenswrapper[5039]: I1124 14:42:23.788291 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-27h4j" podStartSLOduration=3.3004027049999998 podStartE2EDuration="8.788261503s" podCreationTimestamp="2025-11-24 14:42:15 +0000 UTC" firstStartedPulling="2025-11-24 14:42:17.662267954 +0000 UTC m=+5050.101392454" lastFinishedPulling="2025-11-24 14:42:23.150126752 +0000 UTC m=+5055.589251252" observedRunningTime="2025-11-24 14:42:23.786145141 +0000 UTC m=+5056.225269641" watchObservedRunningTime="2025-11-24 14:42:23.788261503 +0000 UTC m=+5056.227386023" Nov 24 14:42:25 crc kubenswrapper[5039]: I1124 14:42:25.876446 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-27h4j" Nov 24 14:42:25 crc kubenswrapper[5039]: I1124 14:42:25.876955 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-27h4j" Nov 24 14:42:26 crc kubenswrapper[5039]: I1124 14:42:26.930136 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-27h4j" podUID="cfb56e18-4b61-456f-9159-6ed73cfa0aca" containerName="registry-server" probeResult="failure" output=< Nov 24 14:42:26 crc kubenswrapper[5039]: timeout: failed to connect service ":50051" within 1s Nov 24 14:42:26 crc kubenswrapper[5039]: > Nov 24 14:42:28 crc kubenswrapper[5039]: I1124 14:42:28.314045 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:42:28 crc kubenswrapper[5039]: E1124 14:42:28.314686 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:42:36 crc kubenswrapper[5039]: I1124 14:42:36.922931 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-27h4j" podUID="cfb56e18-4b61-456f-9159-6ed73cfa0aca" containerName="registry-server" probeResult="failure" output=< Nov 24 14:42:36 crc kubenswrapper[5039]: timeout: failed to connect service ":50051" within 1s Nov 24 14:42:36 crc kubenswrapper[5039]: > Nov 24 14:42:41 crc kubenswrapper[5039]: I1124 14:42:41.307392 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:42:41 crc kubenswrapper[5039]: E1124 14:42:41.310960 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:42:46 crc kubenswrapper[5039]: I1124 14:42:46.938996 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-27h4j" podUID="cfb56e18-4b61-456f-9159-6ed73cfa0aca" containerName="registry-server" probeResult="failure" output=< Nov 24 14:42:46 crc kubenswrapper[5039]: timeout: failed to connect service ":50051" within 1s Nov 24 14:42:46 crc kubenswrapper[5039]: > Nov 24 14:42:55 crc kubenswrapper[5039]: I1124 14:42:55.306863 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:42:55 crc kubenswrapper[5039]: E1124 14:42:55.307654 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:42:55 crc kubenswrapper[5039]: I1124 14:42:55.934187 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-27h4j" Nov 24 14:42:55 crc kubenswrapper[5039]: I1124 14:42:55.991710 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-27h4j" Nov 24 14:42:57 crc kubenswrapper[5039]: I1124 14:42:57.100967 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-27h4j"] Nov 24 14:42:57 crc kubenswrapper[5039]: I1124 14:42:57.101390 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-27h4j" podUID="cfb56e18-4b61-456f-9159-6ed73cfa0aca" containerName="registry-server" containerID="cri-o://17f848c98dd5f02f6ad66ebff581baa64c5140c01843e95c72e106667b55e057" gracePeriod=2 Nov 24 14:42:57 crc kubenswrapper[5039]: I1124 14:42:57.704211 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-27h4j" Nov 24 14:42:57 crc kubenswrapper[5039]: I1124 14:42:57.840440 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cfb56e18-4b61-456f-9159-6ed73cfa0aca-utilities\") pod \"cfb56e18-4b61-456f-9159-6ed73cfa0aca\" (UID: \"cfb56e18-4b61-456f-9159-6ed73cfa0aca\") " Nov 24 14:42:57 crc kubenswrapper[5039]: I1124 14:42:57.840829 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p5hnp\" (UniqueName: \"kubernetes.io/projected/cfb56e18-4b61-456f-9159-6ed73cfa0aca-kube-api-access-p5hnp\") pod \"cfb56e18-4b61-456f-9159-6ed73cfa0aca\" (UID: \"cfb56e18-4b61-456f-9159-6ed73cfa0aca\") " Nov 24 14:42:57 crc kubenswrapper[5039]: I1124 14:42:57.841064 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cfb56e18-4b61-456f-9159-6ed73cfa0aca-catalog-content\") pod \"cfb56e18-4b61-456f-9159-6ed73cfa0aca\" (UID: \"cfb56e18-4b61-456f-9159-6ed73cfa0aca\") " Nov 24 14:42:57 crc kubenswrapper[5039]: I1124 14:42:57.841456 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cfb56e18-4b61-456f-9159-6ed73cfa0aca-utilities" (OuterVolumeSpecName: "utilities") pod "cfb56e18-4b61-456f-9159-6ed73cfa0aca" (UID: "cfb56e18-4b61-456f-9159-6ed73cfa0aca"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:42:57 crc kubenswrapper[5039]: I1124 14:42:57.841792 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cfb56e18-4b61-456f-9159-6ed73cfa0aca-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:42:57 crc kubenswrapper[5039]: I1124 14:42:57.849055 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfb56e18-4b61-456f-9159-6ed73cfa0aca-kube-api-access-p5hnp" (OuterVolumeSpecName: "kube-api-access-p5hnp") pod "cfb56e18-4b61-456f-9159-6ed73cfa0aca" (UID: "cfb56e18-4b61-456f-9159-6ed73cfa0aca"). InnerVolumeSpecName "kube-api-access-p5hnp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:42:57 crc kubenswrapper[5039]: I1124 14:42:57.932458 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cfb56e18-4b61-456f-9159-6ed73cfa0aca-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cfb56e18-4b61-456f-9159-6ed73cfa0aca" (UID: "cfb56e18-4b61-456f-9159-6ed73cfa0aca"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:42:57 crc kubenswrapper[5039]: I1124 14:42:57.944252 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p5hnp\" (UniqueName: \"kubernetes.io/projected/cfb56e18-4b61-456f-9159-6ed73cfa0aca-kube-api-access-p5hnp\") on node \"crc\" DevicePath \"\"" Nov 24 14:42:57 crc kubenswrapper[5039]: I1124 14:42:57.944291 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cfb56e18-4b61-456f-9159-6ed73cfa0aca-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:42:58 crc kubenswrapper[5039]: I1124 14:42:58.115907 5039 generic.go:334] "Generic (PLEG): container finished" podID="cfb56e18-4b61-456f-9159-6ed73cfa0aca" containerID="17f848c98dd5f02f6ad66ebff581baa64c5140c01843e95c72e106667b55e057" exitCode=0 Nov 24 14:42:58 crc kubenswrapper[5039]: I1124 14:42:58.115952 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-27h4j" Nov 24 14:42:58 crc kubenswrapper[5039]: I1124 14:42:58.115957 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-27h4j" event={"ID":"cfb56e18-4b61-456f-9159-6ed73cfa0aca","Type":"ContainerDied","Data":"17f848c98dd5f02f6ad66ebff581baa64c5140c01843e95c72e106667b55e057"} Nov 24 14:42:58 crc kubenswrapper[5039]: I1124 14:42:58.115986 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-27h4j" event={"ID":"cfb56e18-4b61-456f-9159-6ed73cfa0aca","Type":"ContainerDied","Data":"41d4228b643b3cd5d7e02425dc07e6886ace06670c7e37f0bb4e45bd5c4d3ed3"} Nov 24 14:42:58 crc kubenswrapper[5039]: I1124 14:42:58.116005 5039 scope.go:117] "RemoveContainer" containerID="17f848c98dd5f02f6ad66ebff581baa64c5140c01843e95c72e106667b55e057" Nov 24 14:42:58 crc kubenswrapper[5039]: I1124 14:42:58.156025 5039 scope.go:117] "RemoveContainer" containerID="d0a7085c56a4c258b699b75fc136d03e7bfa2a8485159aa38586c8616397ea53" Nov 24 14:42:58 crc kubenswrapper[5039]: I1124 14:42:58.159601 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-27h4j"] Nov 24 14:42:58 crc kubenswrapper[5039]: I1124 14:42:58.170014 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-27h4j"] Nov 24 14:42:58 crc kubenswrapper[5039]: I1124 14:42:58.182058 5039 scope.go:117] "RemoveContainer" containerID="18f0c6581dcf3cbc6ba673ffd56173fba468fed72eaec007c61aafdc83097562" Nov 24 14:42:58 crc kubenswrapper[5039]: I1124 14:42:58.238024 5039 scope.go:117] "RemoveContainer" containerID="17f848c98dd5f02f6ad66ebff581baa64c5140c01843e95c72e106667b55e057" Nov 24 14:42:58 crc kubenswrapper[5039]: E1124 14:42:58.238490 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17f848c98dd5f02f6ad66ebff581baa64c5140c01843e95c72e106667b55e057\": container with ID starting with 17f848c98dd5f02f6ad66ebff581baa64c5140c01843e95c72e106667b55e057 not found: ID does not exist" containerID="17f848c98dd5f02f6ad66ebff581baa64c5140c01843e95c72e106667b55e057" Nov 24 14:42:58 crc kubenswrapper[5039]: I1124 14:42:58.238548 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17f848c98dd5f02f6ad66ebff581baa64c5140c01843e95c72e106667b55e057"} err="failed to get container status \"17f848c98dd5f02f6ad66ebff581baa64c5140c01843e95c72e106667b55e057\": rpc error: code = NotFound desc = could not find container \"17f848c98dd5f02f6ad66ebff581baa64c5140c01843e95c72e106667b55e057\": container with ID starting with 17f848c98dd5f02f6ad66ebff581baa64c5140c01843e95c72e106667b55e057 not found: ID does not exist" Nov 24 14:42:58 crc kubenswrapper[5039]: I1124 14:42:58.238575 5039 scope.go:117] "RemoveContainer" containerID="d0a7085c56a4c258b699b75fc136d03e7bfa2a8485159aa38586c8616397ea53" Nov 24 14:42:58 crc kubenswrapper[5039]: E1124 14:42:58.239072 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d0a7085c56a4c258b699b75fc136d03e7bfa2a8485159aa38586c8616397ea53\": container with ID starting with d0a7085c56a4c258b699b75fc136d03e7bfa2a8485159aa38586c8616397ea53 not found: ID does not exist" containerID="d0a7085c56a4c258b699b75fc136d03e7bfa2a8485159aa38586c8616397ea53" Nov 24 14:42:58 crc kubenswrapper[5039]: I1124 14:42:58.239127 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0a7085c56a4c258b699b75fc136d03e7bfa2a8485159aa38586c8616397ea53"} err="failed to get container status \"d0a7085c56a4c258b699b75fc136d03e7bfa2a8485159aa38586c8616397ea53\": rpc error: code = NotFound desc = could not find container \"d0a7085c56a4c258b699b75fc136d03e7bfa2a8485159aa38586c8616397ea53\": container with ID starting with d0a7085c56a4c258b699b75fc136d03e7bfa2a8485159aa38586c8616397ea53 not found: ID does not exist" Nov 24 14:42:58 crc kubenswrapper[5039]: I1124 14:42:58.239167 5039 scope.go:117] "RemoveContainer" containerID="18f0c6581dcf3cbc6ba673ffd56173fba468fed72eaec007c61aafdc83097562" Nov 24 14:42:58 crc kubenswrapper[5039]: E1124 14:42:58.239619 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18f0c6581dcf3cbc6ba673ffd56173fba468fed72eaec007c61aafdc83097562\": container with ID starting with 18f0c6581dcf3cbc6ba673ffd56173fba468fed72eaec007c61aafdc83097562 not found: ID does not exist" containerID="18f0c6581dcf3cbc6ba673ffd56173fba468fed72eaec007c61aafdc83097562" Nov 24 14:42:58 crc kubenswrapper[5039]: I1124 14:42:58.239644 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18f0c6581dcf3cbc6ba673ffd56173fba468fed72eaec007c61aafdc83097562"} err="failed to get container status \"18f0c6581dcf3cbc6ba673ffd56173fba468fed72eaec007c61aafdc83097562\": rpc error: code = NotFound desc = could not find container \"18f0c6581dcf3cbc6ba673ffd56173fba468fed72eaec007c61aafdc83097562\": container with ID starting with 18f0c6581dcf3cbc6ba673ffd56173fba468fed72eaec007c61aafdc83097562 not found: ID does not exist" Nov 24 14:42:58 crc kubenswrapper[5039]: I1124 14:42:58.320327 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cfb56e18-4b61-456f-9159-6ed73cfa0aca" path="/var/lib/kubelet/pods/cfb56e18-4b61-456f-9159-6ed73cfa0aca/volumes" Nov 24 14:43:09 crc kubenswrapper[5039]: I1124 14:43:09.306643 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:43:09 crc kubenswrapper[5039]: E1124 14:43:09.307343 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:43:14 crc kubenswrapper[5039]: E1124 14:43:14.516734 5039 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.175:42562->38.102.83.175:41425: write tcp 38.102.83.175:42562->38.102.83.175:41425: write: broken pipe Nov 24 14:43:22 crc kubenswrapper[5039]: I1124 14:43:22.307196 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:43:22 crc kubenswrapper[5039]: E1124 14:43:22.308476 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:43:36 crc kubenswrapper[5039]: I1124 14:43:36.306720 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:43:36 crc kubenswrapper[5039]: E1124 14:43:36.307738 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:43:47 crc kubenswrapper[5039]: I1124 14:43:47.306713 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:43:47 crc kubenswrapper[5039]: E1124 14:43:47.307701 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:43:59 crc kubenswrapper[5039]: I1124 14:43:59.307765 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:43:59 crc kubenswrapper[5039]: E1124 14:43:59.309018 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:44:13 crc kubenswrapper[5039]: I1124 14:44:13.306366 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:44:13 crc kubenswrapper[5039]: E1124 14:44:13.310270 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:44:26 crc kubenswrapper[5039]: I1124 14:44:26.307491 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:44:26 crc kubenswrapper[5039]: E1124 14:44:26.308455 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:44:38 crc kubenswrapper[5039]: I1124 14:44:38.318367 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:44:38 crc kubenswrapper[5039]: E1124 14:44:38.319351 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:44:49 crc kubenswrapper[5039]: I1124 14:44:49.307498 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:44:49 crc kubenswrapper[5039]: E1124 14:44:49.308722 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:45:00 crc kubenswrapper[5039]: I1124 14:45:00.168466 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399925-xcwb6"] Nov 24 14:45:00 crc kubenswrapper[5039]: E1124 14:45:00.169593 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfb56e18-4b61-456f-9159-6ed73cfa0aca" containerName="extract-utilities" Nov 24 14:45:00 crc kubenswrapper[5039]: I1124 14:45:00.169610 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfb56e18-4b61-456f-9159-6ed73cfa0aca" containerName="extract-utilities" Nov 24 14:45:00 crc kubenswrapper[5039]: E1124 14:45:00.169630 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfb56e18-4b61-456f-9159-6ed73cfa0aca" containerName="registry-server" Nov 24 14:45:00 crc kubenswrapper[5039]: I1124 14:45:00.169638 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfb56e18-4b61-456f-9159-6ed73cfa0aca" containerName="registry-server" Nov 24 14:45:00 crc kubenswrapper[5039]: E1124 14:45:00.169662 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfb56e18-4b61-456f-9159-6ed73cfa0aca" containerName="extract-content" Nov 24 14:45:00 crc kubenswrapper[5039]: I1124 14:45:00.169672 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfb56e18-4b61-456f-9159-6ed73cfa0aca" containerName="extract-content" Nov 24 14:45:00 crc kubenswrapper[5039]: I1124 14:45:00.169940 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfb56e18-4b61-456f-9159-6ed73cfa0aca" containerName="registry-server" Nov 24 14:45:00 crc kubenswrapper[5039]: I1124 14:45:00.170940 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399925-xcwb6" Nov 24 14:45:00 crc kubenswrapper[5039]: I1124 14:45:00.173106 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 14:45:00 crc kubenswrapper[5039]: I1124 14:45:00.174673 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 14:45:00 crc kubenswrapper[5039]: I1124 14:45:00.186633 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399925-xcwb6"] Nov 24 14:45:00 crc kubenswrapper[5039]: I1124 14:45:00.327774 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dllr9\" (UniqueName: \"kubernetes.io/projected/251b2e5c-beac-4081-ba13-c9c21e99d0af-kube-api-access-dllr9\") pod \"collect-profiles-29399925-xcwb6\" (UID: \"251b2e5c-beac-4081-ba13-c9c21e99d0af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399925-xcwb6" Nov 24 14:45:00 crc kubenswrapper[5039]: I1124 14:45:00.327966 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/251b2e5c-beac-4081-ba13-c9c21e99d0af-config-volume\") pod \"collect-profiles-29399925-xcwb6\" (UID: \"251b2e5c-beac-4081-ba13-c9c21e99d0af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399925-xcwb6" Nov 24 14:45:00 crc kubenswrapper[5039]: I1124 14:45:00.328050 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/251b2e5c-beac-4081-ba13-c9c21e99d0af-secret-volume\") pod \"collect-profiles-29399925-xcwb6\" (UID: \"251b2e5c-beac-4081-ba13-c9c21e99d0af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399925-xcwb6" Nov 24 14:45:00 crc kubenswrapper[5039]: I1124 14:45:00.430221 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dllr9\" (UniqueName: \"kubernetes.io/projected/251b2e5c-beac-4081-ba13-c9c21e99d0af-kube-api-access-dllr9\") pod \"collect-profiles-29399925-xcwb6\" (UID: \"251b2e5c-beac-4081-ba13-c9c21e99d0af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399925-xcwb6" Nov 24 14:45:00 crc kubenswrapper[5039]: I1124 14:45:00.430351 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/251b2e5c-beac-4081-ba13-c9c21e99d0af-config-volume\") pod \"collect-profiles-29399925-xcwb6\" (UID: \"251b2e5c-beac-4081-ba13-c9c21e99d0af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399925-xcwb6" Nov 24 14:45:00 crc kubenswrapper[5039]: I1124 14:45:00.430407 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/251b2e5c-beac-4081-ba13-c9c21e99d0af-secret-volume\") pod \"collect-profiles-29399925-xcwb6\" (UID: \"251b2e5c-beac-4081-ba13-c9c21e99d0af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399925-xcwb6" Nov 24 14:45:00 crc kubenswrapper[5039]: I1124 14:45:00.433790 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/251b2e5c-beac-4081-ba13-c9c21e99d0af-config-volume\") pod \"collect-profiles-29399925-xcwb6\" (UID: \"251b2e5c-beac-4081-ba13-c9c21e99d0af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399925-xcwb6" Nov 24 14:45:00 crc kubenswrapper[5039]: I1124 14:45:00.446348 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/251b2e5c-beac-4081-ba13-c9c21e99d0af-secret-volume\") pod \"collect-profiles-29399925-xcwb6\" (UID: \"251b2e5c-beac-4081-ba13-c9c21e99d0af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399925-xcwb6" Nov 24 14:45:00 crc kubenswrapper[5039]: I1124 14:45:00.449674 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dllr9\" (UniqueName: \"kubernetes.io/projected/251b2e5c-beac-4081-ba13-c9c21e99d0af-kube-api-access-dllr9\") pod \"collect-profiles-29399925-xcwb6\" (UID: \"251b2e5c-beac-4081-ba13-c9c21e99d0af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399925-xcwb6" Nov 24 14:45:00 crc kubenswrapper[5039]: I1124 14:45:00.497152 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399925-xcwb6" Nov 24 14:45:00 crc kubenswrapper[5039]: I1124 14:45:00.983591 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399925-xcwb6"] Nov 24 14:45:01 crc kubenswrapper[5039]: W1124 14:45:01.284206 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod251b2e5c_beac_4081_ba13_c9c21e99d0af.slice/crio-d4d89d708fd2f6013c0d94ebf94b9778dcee51c574788be7feca4aff168f0247 WatchSource:0}: Error finding container d4d89d708fd2f6013c0d94ebf94b9778dcee51c574788be7feca4aff168f0247: Status 404 returned error can't find the container with id d4d89d708fd2f6013c0d94ebf94b9778dcee51c574788be7feca4aff168f0247 Nov 24 14:45:01 crc kubenswrapper[5039]: I1124 14:45:01.478543 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399925-xcwb6" event={"ID":"251b2e5c-beac-4081-ba13-c9c21e99d0af","Type":"ContainerStarted","Data":"d4d89d708fd2f6013c0d94ebf94b9778dcee51c574788be7feca4aff168f0247"} Nov 24 14:45:02 crc kubenswrapper[5039]: I1124 14:45:02.307829 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:45:02 crc kubenswrapper[5039]: E1124 14:45:02.308327 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:45:02 crc kubenswrapper[5039]: I1124 14:45:02.491544 5039 generic.go:334] "Generic (PLEG): container finished" podID="251b2e5c-beac-4081-ba13-c9c21e99d0af" containerID="f9c555e37530ef860701043c4e3f5140f92b20ea397d6c322b77dffdd62f4782" exitCode=0 Nov 24 14:45:02 crc kubenswrapper[5039]: I1124 14:45:02.491610 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399925-xcwb6" event={"ID":"251b2e5c-beac-4081-ba13-c9c21e99d0af","Type":"ContainerDied","Data":"f9c555e37530ef860701043c4e3f5140f92b20ea397d6c322b77dffdd62f4782"} Nov 24 14:45:04 crc kubenswrapper[5039]: I1124 14:45:04.212353 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399925-xcwb6" Nov 24 14:45:04 crc kubenswrapper[5039]: I1124 14:45:04.326917 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/251b2e5c-beac-4081-ba13-c9c21e99d0af-config-volume\") pod \"251b2e5c-beac-4081-ba13-c9c21e99d0af\" (UID: \"251b2e5c-beac-4081-ba13-c9c21e99d0af\") " Nov 24 14:45:04 crc kubenswrapper[5039]: I1124 14:45:04.327371 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/251b2e5c-beac-4081-ba13-c9c21e99d0af-secret-volume\") pod \"251b2e5c-beac-4081-ba13-c9c21e99d0af\" (UID: \"251b2e5c-beac-4081-ba13-c9c21e99d0af\") " Nov 24 14:45:04 crc kubenswrapper[5039]: I1124 14:45:04.327556 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dllr9\" (UniqueName: \"kubernetes.io/projected/251b2e5c-beac-4081-ba13-c9c21e99d0af-kube-api-access-dllr9\") pod \"251b2e5c-beac-4081-ba13-c9c21e99d0af\" (UID: \"251b2e5c-beac-4081-ba13-c9c21e99d0af\") " Nov 24 14:45:04 crc kubenswrapper[5039]: I1124 14:45:04.328068 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/251b2e5c-beac-4081-ba13-c9c21e99d0af-config-volume" (OuterVolumeSpecName: "config-volume") pod "251b2e5c-beac-4081-ba13-c9c21e99d0af" (UID: "251b2e5c-beac-4081-ba13-c9c21e99d0af"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 14:45:04 crc kubenswrapper[5039]: I1124 14:45:04.330429 5039 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/251b2e5c-beac-4081-ba13-c9c21e99d0af-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 14:45:04 crc kubenswrapper[5039]: I1124 14:45:04.341582 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/251b2e5c-beac-4081-ba13-c9c21e99d0af-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "251b2e5c-beac-4081-ba13-c9c21e99d0af" (UID: "251b2e5c-beac-4081-ba13-c9c21e99d0af"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 14:45:04 crc kubenswrapper[5039]: I1124 14:45:04.341661 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/251b2e5c-beac-4081-ba13-c9c21e99d0af-kube-api-access-dllr9" (OuterVolumeSpecName: "kube-api-access-dllr9") pod "251b2e5c-beac-4081-ba13-c9c21e99d0af" (UID: "251b2e5c-beac-4081-ba13-c9c21e99d0af"). InnerVolumeSpecName "kube-api-access-dllr9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:45:04 crc kubenswrapper[5039]: I1124 14:45:04.432926 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dllr9\" (UniqueName: \"kubernetes.io/projected/251b2e5c-beac-4081-ba13-c9c21e99d0af-kube-api-access-dllr9\") on node \"crc\" DevicePath \"\"" Nov 24 14:45:04 crc kubenswrapper[5039]: I1124 14:45:04.432955 5039 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/251b2e5c-beac-4081-ba13-c9c21e99d0af-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 14:45:04 crc kubenswrapper[5039]: I1124 14:45:04.520304 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399925-xcwb6" event={"ID":"251b2e5c-beac-4081-ba13-c9c21e99d0af","Type":"ContainerDied","Data":"d4d89d708fd2f6013c0d94ebf94b9778dcee51c574788be7feca4aff168f0247"} Nov 24 14:45:04 crc kubenswrapper[5039]: I1124 14:45:04.520370 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d4d89d708fd2f6013c0d94ebf94b9778dcee51c574788be7feca4aff168f0247" Nov 24 14:45:04 crc kubenswrapper[5039]: I1124 14:45:04.521415 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399925-xcwb6" Nov 24 14:45:05 crc kubenswrapper[5039]: I1124 14:45:05.307049 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm"] Nov 24 14:45:05 crc kubenswrapper[5039]: I1124 14:45:05.321040 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399880-2nkfm"] Nov 24 14:45:06 crc kubenswrapper[5039]: I1124 14:45:06.321840 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6828d14-20ef-48a1-91cb-c2c3e43744f0" path="/var/lib/kubelet/pods/b6828d14-20ef-48a1-91cb-c2c3e43744f0/volumes" Nov 24 14:45:15 crc kubenswrapper[5039]: I1124 14:45:15.307029 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:45:15 crc kubenswrapper[5039]: E1124 14:45:15.308019 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:45:30 crc kubenswrapper[5039]: I1124 14:45:30.308345 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:45:30 crc kubenswrapper[5039]: E1124 14:45:30.309878 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:45:39 crc kubenswrapper[5039]: I1124 14:45:39.221031 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fmf2d"] Nov 24 14:45:39 crc kubenswrapper[5039]: E1124 14:45:39.222673 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="251b2e5c-beac-4081-ba13-c9c21e99d0af" containerName="collect-profiles" Nov 24 14:45:39 crc kubenswrapper[5039]: I1124 14:45:39.222709 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="251b2e5c-beac-4081-ba13-c9c21e99d0af" containerName="collect-profiles" Nov 24 14:45:39 crc kubenswrapper[5039]: I1124 14:45:39.223332 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="251b2e5c-beac-4081-ba13-c9c21e99d0af" containerName="collect-profiles" Nov 24 14:45:39 crc kubenswrapper[5039]: I1124 14:45:39.227697 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fmf2d" Nov 24 14:45:39 crc kubenswrapper[5039]: I1124 14:45:39.240417 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fmf2d"] Nov 24 14:45:39 crc kubenswrapper[5039]: I1124 14:45:39.252188 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b15260da-4593-448e-b3ef-9596c344a911-catalog-content\") pod \"community-operators-fmf2d\" (UID: \"b15260da-4593-448e-b3ef-9596c344a911\") " pod="openshift-marketplace/community-operators-fmf2d" Nov 24 14:45:39 crc kubenswrapper[5039]: I1124 14:45:39.252233 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhfzt\" (UniqueName: \"kubernetes.io/projected/b15260da-4593-448e-b3ef-9596c344a911-kube-api-access-jhfzt\") pod \"community-operators-fmf2d\" (UID: \"b15260da-4593-448e-b3ef-9596c344a911\") " pod="openshift-marketplace/community-operators-fmf2d" Nov 24 14:45:39 crc kubenswrapper[5039]: I1124 14:45:39.252380 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b15260da-4593-448e-b3ef-9596c344a911-utilities\") pod \"community-operators-fmf2d\" (UID: \"b15260da-4593-448e-b3ef-9596c344a911\") " pod="openshift-marketplace/community-operators-fmf2d" Nov 24 14:45:39 crc kubenswrapper[5039]: I1124 14:45:39.354942 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b15260da-4593-448e-b3ef-9596c344a911-catalog-content\") pod \"community-operators-fmf2d\" (UID: \"b15260da-4593-448e-b3ef-9596c344a911\") " pod="openshift-marketplace/community-operators-fmf2d" Nov 24 14:45:39 crc kubenswrapper[5039]: I1124 14:45:39.354996 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhfzt\" (UniqueName: \"kubernetes.io/projected/b15260da-4593-448e-b3ef-9596c344a911-kube-api-access-jhfzt\") pod \"community-operators-fmf2d\" (UID: \"b15260da-4593-448e-b3ef-9596c344a911\") " pod="openshift-marketplace/community-operators-fmf2d" Nov 24 14:45:39 crc kubenswrapper[5039]: I1124 14:45:39.355063 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b15260da-4593-448e-b3ef-9596c344a911-utilities\") pod \"community-operators-fmf2d\" (UID: \"b15260da-4593-448e-b3ef-9596c344a911\") " pod="openshift-marketplace/community-operators-fmf2d" Nov 24 14:45:39 crc kubenswrapper[5039]: I1124 14:45:39.355742 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b15260da-4593-448e-b3ef-9596c344a911-utilities\") pod \"community-operators-fmf2d\" (UID: \"b15260da-4593-448e-b3ef-9596c344a911\") " pod="openshift-marketplace/community-operators-fmf2d" Nov 24 14:45:39 crc kubenswrapper[5039]: I1124 14:45:39.356593 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b15260da-4593-448e-b3ef-9596c344a911-catalog-content\") pod \"community-operators-fmf2d\" (UID: \"b15260da-4593-448e-b3ef-9596c344a911\") " pod="openshift-marketplace/community-operators-fmf2d" Nov 24 14:45:39 crc kubenswrapper[5039]: I1124 14:45:39.380860 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhfzt\" (UniqueName: \"kubernetes.io/projected/b15260da-4593-448e-b3ef-9596c344a911-kube-api-access-jhfzt\") pod \"community-operators-fmf2d\" (UID: \"b15260da-4593-448e-b3ef-9596c344a911\") " pod="openshift-marketplace/community-operators-fmf2d" Nov 24 14:45:39 crc kubenswrapper[5039]: I1124 14:45:39.563037 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fmf2d" Nov 24 14:45:40 crc kubenswrapper[5039]: I1124 14:45:40.187315 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fmf2d"] Nov 24 14:45:40 crc kubenswrapper[5039]: I1124 14:45:40.980263 5039 generic.go:334] "Generic (PLEG): container finished" podID="b15260da-4593-448e-b3ef-9596c344a911" containerID="8b1970035c1096cecdc06ec6c90d637561dff5e9bfeebdf72ecd4115a4b3a214" exitCode=0 Nov 24 14:45:40 crc kubenswrapper[5039]: I1124 14:45:40.980379 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fmf2d" event={"ID":"b15260da-4593-448e-b3ef-9596c344a911","Type":"ContainerDied","Data":"8b1970035c1096cecdc06ec6c90d637561dff5e9bfeebdf72ecd4115a4b3a214"} Nov 24 14:45:40 crc kubenswrapper[5039]: I1124 14:45:40.980610 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fmf2d" event={"ID":"b15260da-4593-448e-b3ef-9596c344a911","Type":"ContainerStarted","Data":"34de57a67dc1f5ce6715013597e6252ca8d35be4517e9e099d64db56c988a878"} Nov 24 14:45:42 crc kubenswrapper[5039]: I1124 14:45:42.002336 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fmf2d" event={"ID":"b15260da-4593-448e-b3ef-9596c344a911","Type":"ContainerStarted","Data":"e5a832c19c19002819bb04fe4161e19cb48eb40a7350d3d682f36ccec6111f6b"} Nov 24 14:45:42 crc kubenswrapper[5039]: I1124 14:45:42.307665 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:45:42 crc kubenswrapper[5039]: E1124 14:45:42.308026 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:45:43 crc kubenswrapper[5039]: I1124 14:45:43.015187 5039 generic.go:334] "Generic (PLEG): container finished" podID="b15260da-4593-448e-b3ef-9596c344a911" containerID="e5a832c19c19002819bb04fe4161e19cb48eb40a7350d3d682f36ccec6111f6b" exitCode=0 Nov 24 14:45:43 crc kubenswrapper[5039]: I1124 14:45:43.015247 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fmf2d" event={"ID":"b15260da-4593-448e-b3ef-9596c344a911","Type":"ContainerDied","Data":"e5a832c19c19002819bb04fe4161e19cb48eb40a7350d3d682f36ccec6111f6b"} Nov 24 14:45:44 crc kubenswrapper[5039]: I1124 14:45:44.028414 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fmf2d" event={"ID":"b15260da-4593-448e-b3ef-9596c344a911","Type":"ContainerStarted","Data":"f0b928053b71a95f2d078cba4809244dfd22b14405da949d50d2be5d3b805a8b"} Nov 24 14:45:44 crc kubenswrapper[5039]: I1124 14:45:44.063896 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fmf2d" podStartSLOduration=2.6058864980000003 podStartE2EDuration="5.0638669s" podCreationTimestamp="2025-11-24 14:45:39 +0000 UTC" firstStartedPulling="2025-11-24 14:45:40.982964079 +0000 UTC m=+5253.422088589" lastFinishedPulling="2025-11-24 14:45:43.440944491 +0000 UTC m=+5255.880068991" observedRunningTime="2025-11-24 14:45:44.057423212 +0000 UTC m=+5256.496547752" watchObservedRunningTime="2025-11-24 14:45:44.0638669 +0000 UTC m=+5256.502991450" Nov 24 14:45:49 crc kubenswrapper[5039]: I1124 14:45:49.563746 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fmf2d" Nov 24 14:45:49 crc kubenswrapper[5039]: I1124 14:45:49.564383 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fmf2d" Nov 24 14:45:49 crc kubenswrapper[5039]: I1124 14:45:49.614307 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fmf2d" Nov 24 14:45:50 crc kubenswrapper[5039]: I1124 14:45:50.190405 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fmf2d" Nov 24 14:45:50 crc kubenswrapper[5039]: I1124 14:45:50.273455 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fmf2d"] Nov 24 14:45:52 crc kubenswrapper[5039]: I1124 14:45:52.119745 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fmf2d" podUID="b15260da-4593-448e-b3ef-9596c344a911" containerName="registry-server" containerID="cri-o://f0b928053b71a95f2d078cba4809244dfd22b14405da949d50d2be5d3b805a8b" gracePeriod=2 Nov 24 14:45:53 crc kubenswrapper[5039]: I1124 14:45:53.138323 5039 generic.go:334] "Generic (PLEG): container finished" podID="b15260da-4593-448e-b3ef-9596c344a911" containerID="f0b928053b71a95f2d078cba4809244dfd22b14405da949d50d2be5d3b805a8b" exitCode=0 Nov 24 14:45:53 crc kubenswrapper[5039]: I1124 14:45:53.138396 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fmf2d" event={"ID":"b15260da-4593-448e-b3ef-9596c344a911","Type":"ContainerDied","Data":"f0b928053b71a95f2d078cba4809244dfd22b14405da949d50d2be5d3b805a8b"} Nov 24 14:45:53 crc kubenswrapper[5039]: I1124 14:45:53.138447 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fmf2d" event={"ID":"b15260da-4593-448e-b3ef-9596c344a911","Type":"ContainerDied","Data":"34de57a67dc1f5ce6715013597e6252ca8d35be4517e9e099d64db56c988a878"} Nov 24 14:45:53 crc kubenswrapper[5039]: I1124 14:45:53.138468 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="34de57a67dc1f5ce6715013597e6252ca8d35be4517e9e099d64db56c988a878" Nov 24 14:45:53 crc kubenswrapper[5039]: I1124 14:45:53.226814 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fmf2d" Nov 24 14:45:53 crc kubenswrapper[5039]: I1124 14:45:53.318144 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhfzt\" (UniqueName: \"kubernetes.io/projected/b15260da-4593-448e-b3ef-9596c344a911-kube-api-access-jhfzt\") pod \"b15260da-4593-448e-b3ef-9596c344a911\" (UID: \"b15260da-4593-448e-b3ef-9596c344a911\") " Nov 24 14:45:53 crc kubenswrapper[5039]: I1124 14:45:53.318852 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b15260da-4593-448e-b3ef-9596c344a911-catalog-content\") pod \"b15260da-4593-448e-b3ef-9596c344a911\" (UID: \"b15260da-4593-448e-b3ef-9596c344a911\") " Nov 24 14:45:53 crc kubenswrapper[5039]: I1124 14:45:53.318972 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b15260da-4593-448e-b3ef-9596c344a911-utilities\") pod \"b15260da-4593-448e-b3ef-9596c344a911\" (UID: \"b15260da-4593-448e-b3ef-9596c344a911\") " Nov 24 14:45:53 crc kubenswrapper[5039]: I1124 14:45:53.322986 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b15260da-4593-448e-b3ef-9596c344a911-utilities" (OuterVolumeSpecName: "utilities") pod "b15260da-4593-448e-b3ef-9596c344a911" (UID: "b15260da-4593-448e-b3ef-9596c344a911"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:45:53 crc kubenswrapper[5039]: I1124 14:45:53.332758 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b15260da-4593-448e-b3ef-9596c344a911-kube-api-access-jhfzt" (OuterVolumeSpecName: "kube-api-access-jhfzt") pod "b15260da-4593-448e-b3ef-9596c344a911" (UID: "b15260da-4593-448e-b3ef-9596c344a911"). InnerVolumeSpecName "kube-api-access-jhfzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:45:53 crc kubenswrapper[5039]: I1124 14:45:53.399316 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b15260da-4593-448e-b3ef-9596c344a911-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b15260da-4593-448e-b3ef-9596c344a911" (UID: "b15260da-4593-448e-b3ef-9596c344a911"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:45:53 crc kubenswrapper[5039]: I1124 14:45:53.423627 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b15260da-4593-448e-b3ef-9596c344a911-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:45:53 crc kubenswrapper[5039]: I1124 14:45:53.423689 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b15260da-4593-448e-b3ef-9596c344a911-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:45:53 crc kubenswrapper[5039]: I1124 14:45:53.423705 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhfzt\" (UniqueName: \"kubernetes.io/projected/b15260da-4593-448e-b3ef-9596c344a911-kube-api-access-jhfzt\") on node \"crc\" DevicePath \"\"" Nov 24 14:45:54 crc kubenswrapper[5039]: I1124 14:45:54.148135 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fmf2d" Nov 24 14:45:54 crc kubenswrapper[5039]: I1124 14:45:54.195959 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fmf2d"] Nov 24 14:45:54 crc kubenswrapper[5039]: I1124 14:45:54.207213 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fmf2d"] Nov 24 14:45:54 crc kubenswrapper[5039]: I1124 14:45:54.307869 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:45:54 crc kubenswrapper[5039]: I1124 14:45:54.330799 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b15260da-4593-448e-b3ef-9596c344a911" path="/var/lib/kubelet/pods/b15260da-4593-448e-b3ef-9596c344a911/volumes" Nov 24 14:45:55 crc kubenswrapper[5039]: I1124 14:45:55.160729 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"588ec00ff0baad475dbb7f43efd2ba6987e01bf02046b7c136c3ad7b91310933"} Nov 24 14:45:55 crc kubenswrapper[5039]: I1124 14:45:55.445067 5039 scope.go:117] "RemoveContainer" containerID="f8de2432faf3a95e0b2d51a2c0f17621df6e8902438b2bc833eed563f41bda23" Nov 24 14:46:45 crc kubenswrapper[5039]: I1124 14:46:45.178459 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xzdrc"] Nov 24 14:46:45 crc kubenswrapper[5039]: E1124 14:46:45.179506 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b15260da-4593-448e-b3ef-9596c344a911" containerName="registry-server" Nov 24 14:46:45 crc kubenswrapper[5039]: I1124 14:46:45.179542 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b15260da-4593-448e-b3ef-9596c344a911" containerName="registry-server" Nov 24 14:46:45 crc kubenswrapper[5039]: E1124 14:46:45.179569 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b15260da-4593-448e-b3ef-9596c344a911" containerName="extract-content" Nov 24 14:46:45 crc kubenswrapper[5039]: I1124 14:46:45.179576 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b15260da-4593-448e-b3ef-9596c344a911" containerName="extract-content" Nov 24 14:46:45 crc kubenswrapper[5039]: E1124 14:46:45.179601 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b15260da-4593-448e-b3ef-9596c344a911" containerName="extract-utilities" Nov 24 14:46:45 crc kubenswrapper[5039]: I1124 14:46:45.179609 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="b15260da-4593-448e-b3ef-9596c344a911" containerName="extract-utilities" Nov 24 14:46:45 crc kubenswrapper[5039]: I1124 14:46:45.179858 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="b15260da-4593-448e-b3ef-9596c344a911" containerName="registry-server" Nov 24 14:46:45 crc kubenswrapper[5039]: I1124 14:46:45.181691 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xzdrc" Nov 24 14:46:45 crc kubenswrapper[5039]: I1124 14:46:45.192174 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xzdrc"] Nov 24 14:46:45 crc kubenswrapper[5039]: I1124 14:46:45.285988 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66e8c873-694c-4025-a9d8-c74e8deb6e29-catalog-content\") pod \"redhat-marketplace-xzdrc\" (UID: \"66e8c873-694c-4025-a9d8-c74e8deb6e29\") " pod="openshift-marketplace/redhat-marketplace-xzdrc" Nov 24 14:46:45 crc kubenswrapper[5039]: I1124 14:46:45.286114 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdj2c\" (UniqueName: \"kubernetes.io/projected/66e8c873-694c-4025-a9d8-c74e8deb6e29-kube-api-access-wdj2c\") pod \"redhat-marketplace-xzdrc\" (UID: \"66e8c873-694c-4025-a9d8-c74e8deb6e29\") " pod="openshift-marketplace/redhat-marketplace-xzdrc" Nov 24 14:46:45 crc kubenswrapper[5039]: I1124 14:46:45.286199 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66e8c873-694c-4025-a9d8-c74e8deb6e29-utilities\") pod \"redhat-marketplace-xzdrc\" (UID: \"66e8c873-694c-4025-a9d8-c74e8deb6e29\") " pod="openshift-marketplace/redhat-marketplace-xzdrc" Nov 24 14:46:45 crc kubenswrapper[5039]: I1124 14:46:45.387480 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66e8c873-694c-4025-a9d8-c74e8deb6e29-catalog-content\") pod \"redhat-marketplace-xzdrc\" (UID: \"66e8c873-694c-4025-a9d8-c74e8deb6e29\") " pod="openshift-marketplace/redhat-marketplace-xzdrc" Nov 24 14:46:45 crc kubenswrapper[5039]: I1124 14:46:45.387914 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdj2c\" (UniqueName: \"kubernetes.io/projected/66e8c873-694c-4025-a9d8-c74e8deb6e29-kube-api-access-wdj2c\") pod \"redhat-marketplace-xzdrc\" (UID: \"66e8c873-694c-4025-a9d8-c74e8deb6e29\") " pod="openshift-marketplace/redhat-marketplace-xzdrc" Nov 24 14:46:45 crc kubenswrapper[5039]: I1124 14:46:45.388012 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66e8c873-694c-4025-a9d8-c74e8deb6e29-utilities\") pod \"redhat-marketplace-xzdrc\" (UID: \"66e8c873-694c-4025-a9d8-c74e8deb6e29\") " pod="openshift-marketplace/redhat-marketplace-xzdrc" Nov 24 14:46:45 crc kubenswrapper[5039]: I1124 14:46:45.388361 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66e8c873-694c-4025-a9d8-c74e8deb6e29-catalog-content\") pod \"redhat-marketplace-xzdrc\" (UID: \"66e8c873-694c-4025-a9d8-c74e8deb6e29\") " pod="openshift-marketplace/redhat-marketplace-xzdrc" Nov 24 14:46:45 crc kubenswrapper[5039]: I1124 14:46:45.388469 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66e8c873-694c-4025-a9d8-c74e8deb6e29-utilities\") pod \"redhat-marketplace-xzdrc\" (UID: \"66e8c873-694c-4025-a9d8-c74e8deb6e29\") " pod="openshift-marketplace/redhat-marketplace-xzdrc" Nov 24 14:46:45 crc kubenswrapper[5039]: I1124 14:46:45.417217 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdj2c\" (UniqueName: \"kubernetes.io/projected/66e8c873-694c-4025-a9d8-c74e8deb6e29-kube-api-access-wdj2c\") pod \"redhat-marketplace-xzdrc\" (UID: \"66e8c873-694c-4025-a9d8-c74e8deb6e29\") " pod="openshift-marketplace/redhat-marketplace-xzdrc" Nov 24 14:46:45 crc kubenswrapper[5039]: I1124 14:46:45.527809 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xzdrc" Nov 24 14:46:46 crc kubenswrapper[5039]: I1124 14:46:46.117456 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xzdrc"] Nov 24 14:46:46 crc kubenswrapper[5039]: I1124 14:46:46.783809 5039 generic.go:334] "Generic (PLEG): container finished" podID="66e8c873-694c-4025-a9d8-c74e8deb6e29" containerID="0a99c494c5b858a7fb3b67766e88166684a9f7b008c9f6795bef469abb5610c6" exitCode=0 Nov 24 14:46:46 crc kubenswrapper[5039]: I1124 14:46:46.783890 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xzdrc" event={"ID":"66e8c873-694c-4025-a9d8-c74e8deb6e29","Type":"ContainerDied","Data":"0a99c494c5b858a7fb3b67766e88166684a9f7b008c9f6795bef469abb5610c6"} Nov 24 14:46:46 crc kubenswrapper[5039]: I1124 14:46:46.784130 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xzdrc" event={"ID":"66e8c873-694c-4025-a9d8-c74e8deb6e29","Type":"ContainerStarted","Data":"3c2731999dc83e103fa7656d1b465fe230ce760e6ff6077d88b330b978cf72b0"} Nov 24 14:46:47 crc kubenswrapper[5039]: I1124 14:46:47.797224 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xzdrc" event={"ID":"66e8c873-694c-4025-a9d8-c74e8deb6e29","Type":"ContainerStarted","Data":"258cc81e71b23f914a263dd12ea0c2fb060d8de2f4b277d0b2385da7317c77f8"} Nov 24 14:46:48 crc kubenswrapper[5039]: I1124 14:46:48.811922 5039 generic.go:334] "Generic (PLEG): container finished" podID="66e8c873-694c-4025-a9d8-c74e8deb6e29" containerID="258cc81e71b23f914a263dd12ea0c2fb060d8de2f4b277d0b2385da7317c77f8" exitCode=0 Nov 24 14:46:48 crc kubenswrapper[5039]: I1124 14:46:48.812353 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xzdrc" event={"ID":"66e8c873-694c-4025-a9d8-c74e8deb6e29","Type":"ContainerDied","Data":"258cc81e71b23f914a263dd12ea0c2fb060d8de2f4b277d0b2385da7317c77f8"} Nov 24 14:46:49 crc kubenswrapper[5039]: I1124 14:46:49.823736 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xzdrc" event={"ID":"66e8c873-694c-4025-a9d8-c74e8deb6e29","Type":"ContainerStarted","Data":"99088dc2529cf2324cb9e3f955f75471803ce0f6bd108c2b8f722834666eba05"} Nov 24 14:46:49 crc kubenswrapper[5039]: I1124 14:46:49.852233 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xzdrc" podStartSLOduration=2.417792073 podStartE2EDuration="4.852210975s" podCreationTimestamp="2025-11-24 14:46:45 +0000 UTC" firstStartedPulling="2025-11-24 14:46:46.78570751 +0000 UTC m=+5319.224832000" lastFinishedPulling="2025-11-24 14:46:49.220126382 +0000 UTC m=+5321.659250902" observedRunningTime="2025-11-24 14:46:49.839989376 +0000 UTC m=+5322.279113876" watchObservedRunningTime="2025-11-24 14:46:49.852210975 +0000 UTC m=+5322.291335485" Nov 24 14:46:55 crc kubenswrapper[5039]: I1124 14:46:55.528224 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xzdrc" Nov 24 14:46:55 crc kubenswrapper[5039]: I1124 14:46:55.529734 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xzdrc" Nov 24 14:46:55 crc kubenswrapper[5039]: I1124 14:46:55.604210 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xzdrc" Nov 24 14:46:55 crc kubenswrapper[5039]: I1124 14:46:55.973947 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xzdrc" Nov 24 14:46:56 crc kubenswrapper[5039]: I1124 14:46:56.050227 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xzdrc"] Nov 24 14:46:57 crc kubenswrapper[5039]: I1124 14:46:57.922241 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xzdrc" podUID="66e8c873-694c-4025-a9d8-c74e8deb6e29" containerName="registry-server" containerID="cri-o://99088dc2529cf2324cb9e3f955f75471803ce0f6bd108c2b8f722834666eba05" gracePeriod=2 Nov 24 14:46:58 crc kubenswrapper[5039]: I1124 14:46:58.497149 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xzdrc" Nov 24 14:46:58 crc kubenswrapper[5039]: I1124 14:46:58.605952 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wdj2c\" (UniqueName: \"kubernetes.io/projected/66e8c873-694c-4025-a9d8-c74e8deb6e29-kube-api-access-wdj2c\") pod \"66e8c873-694c-4025-a9d8-c74e8deb6e29\" (UID: \"66e8c873-694c-4025-a9d8-c74e8deb6e29\") " Nov 24 14:46:58 crc kubenswrapper[5039]: I1124 14:46:58.606017 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66e8c873-694c-4025-a9d8-c74e8deb6e29-catalog-content\") pod \"66e8c873-694c-4025-a9d8-c74e8deb6e29\" (UID: \"66e8c873-694c-4025-a9d8-c74e8deb6e29\") " Nov 24 14:46:58 crc kubenswrapper[5039]: I1124 14:46:58.606043 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66e8c873-694c-4025-a9d8-c74e8deb6e29-utilities\") pod \"66e8c873-694c-4025-a9d8-c74e8deb6e29\" (UID: \"66e8c873-694c-4025-a9d8-c74e8deb6e29\") " Nov 24 14:46:58 crc kubenswrapper[5039]: I1124 14:46:58.607400 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66e8c873-694c-4025-a9d8-c74e8deb6e29-utilities" (OuterVolumeSpecName: "utilities") pod "66e8c873-694c-4025-a9d8-c74e8deb6e29" (UID: "66e8c873-694c-4025-a9d8-c74e8deb6e29"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:46:58 crc kubenswrapper[5039]: I1124 14:46:58.636678 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66e8c873-694c-4025-a9d8-c74e8deb6e29-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "66e8c873-694c-4025-a9d8-c74e8deb6e29" (UID: "66e8c873-694c-4025-a9d8-c74e8deb6e29"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:46:58 crc kubenswrapper[5039]: I1124 14:46:58.638197 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66e8c873-694c-4025-a9d8-c74e8deb6e29-kube-api-access-wdj2c" (OuterVolumeSpecName: "kube-api-access-wdj2c") pod "66e8c873-694c-4025-a9d8-c74e8deb6e29" (UID: "66e8c873-694c-4025-a9d8-c74e8deb6e29"). InnerVolumeSpecName "kube-api-access-wdj2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:46:58 crc kubenswrapper[5039]: I1124 14:46:58.708724 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wdj2c\" (UniqueName: \"kubernetes.io/projected/66e8c873-694c-4025-a9d8-c74e8deb6e29-kube-api-access-wdj2c\") on node \"crc\" DevicePath \"\"" Nov 24 14:46:58 crc kubenswrapper[5039]: I1124 14:46:58.708765 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66e8c873-694c-4025-a9d8-c74e8deb6e29-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:46:58 crc kubenswrapper[5039]: I1124 14:46:58.708776 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66e8c873-694c-4025-a9d8-c74e8deb6e29-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:46:58 crc kubenswrapper[5039]: I1124 14:46:58.935957 5039 generic.go:334] "Generic (PLEG): container finished" podID="66e8c873-694c-4025-a9d8-c74e8deb6e29" containerID="99088dc2529cf2324cb9e3f955f75471803ce0f6bd108c2b8f722834666eba05" exitCode=0 Nov 24 14:46:58 crc kubenswrapper[5039]: I1124 14:46:58.936011 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xzdrc" event={"ID":"66e8c873-694c-4025-a9d8-c74e8deb6e29","Type":"ContainerDied","Data":"99088dc2529cf2324cb9e3f955f75471803ce0f6bd108c2b8f722834666eba05"} Nov 24 14:46:58 crc kubenswrapper[5039]: I1124 14:46:58.936041 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xzdrc" event={"ID":"66e8c873-694c-4025-a9d8-c74e8deb6e29","Type":"ContainerDied","Data":"3c2731999dc83e103fa7656d1b465fe230ce760e6ff6077d88b330b978cf72b0"} Nov 24 14:46:58 crc kubenswrapper[5039]: I1124 14:46:58.936061 5039 scope.go:117] "RemoveContainer" containerID="99088dc2529cf2324cb9e3f955f75471803ce0f6bd108c2b8f722834666eba05" Nov 24 14:46:58 crc kubenswrapper[5039]: I1124 14:46:58.936237 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xzdrc" Nov 24 14:46:58 crc kubenswrapper[5039]: I1124 14:46:58.975240 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xzdrc"] Nov 24 14:46:58 crc kubenswrapper[5039]: I1124 14:46:58.975570 5039 scope.go:117] "RemoveContainer" containerID="258cc81e71b23f914a263dd12ea0c2fb060d8de2f4b277d0b2385da7317c77f8" Nov 24 14:46:58 crc kubenswrapper[5039]: I1124 14:46:58.985885 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xzdrc"] Nov 24 14:46:58 crc kubenswrapper[5039]: I1124 14:46:58.997639 5039 scope.go:117] "RemoveContainer" containerID="0a99c494c5b858a7fb3b67766e88166684a9f7b008c9f6795bef469abb5610c6" Nov 24 14:46:59 crc kubenswrapper[5039]: I1124 14:46:59.062613 5039 scope.go:117] "RemoveContainer" containerID="99088dc2529cf2324cb9e3f955f75471803ce0f6bd108c2b8f722834666eba05" Nov 24 14:46:59 crc kubenswrapper[5039]: E1124 14:46:59.063235 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99088dc2529cf2324cb9e3f955f75471803ce0f6bd108c2b8f722834666eba05\": container with ID starting with 99088dc2529cf2324cb9e3f955f75471803ce0f6bd108c2b8f722834666eba05 not found: ID does not exist" containerID="99088dc2529cf2324cb9e3f955f75471803ce0f6bd108c2b8f722834666eba05" Nov 24 14:46:59 crc kubenswrapper[5039]: I1124 14:46:59.063286 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99088dc2529cf2324cb9e3f955f75471803ce0f6bd108c2b8f722834666eba05"} err="failed to get container status \"99088dc2529cf2324cb9e3f955f75471803ce0f6bd108c2b8f722834666eba05\": rpc error: code = NotFound desc = could not find container \"99088dc2529cf2324cb9e3f955f75471803ce0f6bd108c2b8f722834666eba05\": container with ID starting with 99088dc2529cf2324cb9e3f955f75471803ce0f6bd108c2b8f722834666eba05 not found: ID does not exist" Nov 24 14:46:59 crc kubenswrapper[5039]: I1124 14:46:59.063318 5039 scope.go:117] "RemoveContainer" containerID="258cc81e71b23f914a263dd12ea0c2fb060d8de2f4b277d0b2385da7317c77f8" Nov 24 14:46:59 crc kubenswrapper[5039]: E1124 14:46:59.063783 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"258cc81e71b23f914a263dd12ea0c2fb060d8de2f4b277d0b2385da7317c77f8\": container with ID starting with 258cc81e71b23f914a263dd12ea0c2fb060d8de2f4b277d0b2385da7317c77f8 not found: ID does not exist" containerID="258cc81e71b23f914a263dd12ea0c2fb060d8de2f4b277d0b2385da7317c77f8" Nov 24 14:46:59 crc kubenswrapper[5039]: I1124 14:46:59.063828 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"258cc81e71b23f914a263dd12ea0c2fb060d8de2f4b277d0b2385da7317c77f8"} err="failed to get container status \"258cc81e71b23f914a263dd12ea0c2fb060d8de2f4b277d0b2385da7317c77f8\": rpc error: code = NotFound desc = could not find container \"258cc81e71b23f914a263dd12ea0c2fb060d8de2f4b277d0b2385da7317c77f8\": container with ID starting with 258cc81e71b23f914a263dd12ea0c2fb060d8de2f4b277d0b2385da7317c77f8 not found: ID does not exist" Nov 24 14:46:59 crc kubenswrapper[5039]: I1124 14:46:59.063871 5039 scope.go:117] "RemoveContainer" containerID="0a99c494c5b858a7fb3b67766e88166684a9f7b008c9f6795bef469abb5610c6" Nov 24 14:46:59 crc kubenswrapper[5039]: E1124 14:46:59.064218 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a99c494c5b858a7fb3b67766e88166684a9f7b008c9f6795bef469abb5610c6\": container with ID starting with 0a99c494c5b858a7fb3b67766e88166684a9f7b008c9f6795bef469abb5610c6 not found: ID does not exist" containerID="0a99c494c5b858a7fb3b67766e88166684a9f7b008c9f6795bef469abb5610c6" Nov 24 14:46:59 crc kubenswrapper[5039]: I1124 14:46:59.064256 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a99c494c5b858a7fb3b67766e88166684a9f7b008c9f6795bef469abb5610c6"} err="failed to get container status \"0a99c494c5b858a7fb3b67766e88166684a9f7b008c9f6795bef469abb5610c6\": rpc error: code = NotFound desc = could not find container \"0a99c494c5b858a7fb3b67766e88166684a9f7b008c9f6795bef469abb5610c6\": container with ID starting with 0a99c494c5b858a7fb3b67766e88166684a9f7b008c9f6795bef469abb5610c6 not found: ID does not exist" Nov 24 14:47:00 crc kubenswrapper[5039]: I1124 14:47:00.341353 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66e8c873-694c-4025-a9d8-c74e8deb6e29" path="/var/lib/kubelet/pods/66e8c873-694c-4025-a9d8-c74e8deb6e29/volumes" Nov 24 14:47:17 crc kubenswrapper[5039]: I1124 14:47:17.447696 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7r859"] Nov 24 14:47:17 crc kubenswrapper[5039]: E1124 14:47:17.448837 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66e8c873-694c-4025-a9d8-c74e8deb6e29" containerName="extract-utilities" Nov 24 14:47:17 crc kubenswrapper[5039]: I1124 14:47:17.448853 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="66e8c873-694c-4025-a9d8-c74e8deb6e29" containerName="extract-utilities" Nov 24 14:47:17 crc kubenswrapper[5039]: E1124 14:47:17.448865 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66e8c873-694c-4025-a9d8-c74e8deb6e29" containerName="registry-server" Nov 24 14:47:17 crc kubenswrapper[5039]: I1124 14:47:17.448879 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="66e8c873-694c-4025-a9d8-c74e8deb6e29" containerName="registry-server" Nov 24 14:47:17 crc kubenswrapper[5039]: E1124 14:47:17.448940 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66e8c873-694c-4025-a9d8-c74e8deb6e29" containerName="extract-content" Nov 24 14:47:17 crc kubenswrapper[5039]: I1124 14:47:17.448952 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="66e8c873-694c-4025-a9d8-c74e8deb6e29" containerName="extract-content" Nov 24 14:47:17 crc kubenswrapper[5039]: I1124 14:47:17.449259 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="66e8c873-694c-4025-a9d8-c74e8deb6e29" containerName="registry-server" Nov 24 14:47:17 crc kubenswrapper[5039]: I1124 14:47:17.451434 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7r859"] Nov 24 14:47:17 crc kubenswrapper[5039]: I1124 14:47:17.451680 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7r859" Nov 24 14:47:17 crc kubenswrapper[5039]: I1124 14:47:17.460953 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/932a7990-788c-4892-a7d0-8cedaa6f3455-catalog-content\") pod \"certified-operators-7r859\" (UID: \"932a7990-788c-4892-a7d0-8cedaa6f3455\") " pod="openshift-marketplace/certified-operators-7r859" Nov 24 14:47:17 crc kubenswrapper[5039]: I1124 14:47:17.461099 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-255tz\" (UniqueName: \"kubernetes.io/projected/932a7990-788c-4892-a7d0-8cedaa6f3455-kube-api-access-255tz\") pod \"certified-operators-7r859\" (UID: \"932a7990-788c-4892-a7d0-8cedaa6f3455\") " pod="openshift-marketplace/certified-operators-7r859" Nov 24 14:47:17 crc kubenswrapper[5039]: I1124 14:47:17.461569 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/932a7990-788c-4892-a7d0-8cedaa6f3455-utilities\") pod \"certified-operators-7r859\" (UID: \"932a7990-788c-4892-a7d0-8cedaa6f3455\") " pod="openshift-marketplace/certified-operators-7r859" Nov 24 14:47:17 crc kubenswrapper[5039]: I1124 14:47:17.564104 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/932a7990-788c-4892-a7d0-8cedaa6f3455-utilities\") pod \"certified-operators-7r859\" (UID: \"932a7990-788c-4892-a7d0-8cedaa6f3455\") " pod="openshift-marketplace/certified-operators-7r859" Nov 24 14:47:17 crc kubenswrapper[5039]: I1124 14:47:17.564568 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/932a7990-788c-4892-a7d0-8cedaa6f3455-catalog-content\") pod \"certified-operators-7r859\" (UID: \"932a7990-788c-4892-a7d0-8cedaa6f3455\") " pod="openshift-marketplace/certified-operators-7r859" Nov 24 14:47:17 crc kubenswrapper[5039]: I1124 14:47:17.564608 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-255tz\" (UniqueName: \"kubernetes.io/projected/932a7990-788c-4892-a7d0-8cedaa6f3455-kube-api-access-255tz\") pod \"certified-operators-7r859\" (UID: \"932a7990-788c-4892-a7d0-8cedaa6f3455\") " pod="openshift-marketplace/certified-operators-7r859" Nov 24 14:47:17 crc kubenswrapper[5039]: I1124 14:47:17.564647 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/932a7990-788c-4892-a7d0-8cedaa6f3455-utilities\") pod \"certified-operators-7r859\" (UID: \"932a7990-788c-4892-a7d0-8cedaa6f3455\") " pod="openshift-marketplace/certified-operators-7r859" Nov 24 14:47:17 crc kubenswrapper[5039]: I1124 14:47:17.565032 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/932a7990-788c-4892-a7d0-8cedaa6f3455-catalog-content\") pod \"certified-operators-7r859\" (UID: \"932a7990-788c-4892-a7d0-8cedaa6f3455\") " pod="openshift-marketplace/certified-operators-7r859" Nov 24 14:47:17 crc kubenswrapper[5039]: I1124 14:47:17.586758 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-255tz\" (UniqueName: \"kubernetes.io/projected/932a7990-788c-4892-a7d0-8cedaa6f3455-kube-api-access-255tz\") pod \"certified-operators-7r859\" (UID: \"932a7990-788c-4892-a7d0-8cedaa6f3455\") " pod="openshift-marketplace/certified-operators-7r859" Nov 24 14:47:17 crc kubenswrapper[5039]: I1124 14:47:17.781051 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7r859" Nov 24 14:47:18 crc kubenswrapper[5039]: I1124 14:47:18.479844 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7r859"] Nov 24 14:47:19 crc kubenswrapper[5039]: I1124 14:47:19.253708 5039 generic.go:334] "Generic (PLEG): container finished" podID="932a7990-788c-4892-a7d0-8cedaa6f3455" containerID="ad38856c88286668d1667ec91155d91f565515a5ccc46e00b172c10ace24ce16" exitCode=0 Nov 24 14:47:19 crc kubenswrapper[5039]: I1124 14:47:19.254007 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7r859" event={"ID":"932a7990-788c-4892-a7d0-8cedaa6f3455","Type":"ContainerDied","Data":"ad38856c88286668d1667ec91155d91f565515a5ccc46e00b172c10ace24ce16"} Nov 24 14:47:19 crc kubenswrapper[5039]: I1124 14:47:19.254030 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7r859" event={"ID":"932a7990-788c-4892-a7d0-8cedaa6f3455","Type":"ContainerStarted","Data":"8edada0084c29ac0f6215d083a1fbb62609bbc56236c8d393ba8f1a243c7add0"} Nov 24 14:47:19 crc kubenswrapper[5039]: I1124 14:47:19.255818 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 14:47:20 crc kubenswrapper[5039]: I1124 14:47:20.266022 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7r859" event={"ID":"932a7990-788c-4892-a7d0-8cedaa6f3455","Type":"ContainerStarted","Data":"00f8e0166fd342bafa60c772f6bcd04c10983bd17cd40860dd4ccef388310765"} Nov 24 14:47:21 crc kubenswrapper[5039]: I1124 14:47:21.279950 5039 generic.go:334] "Generic (PLEG): container finished" podID="932a7990-788c-4892-a7d0-8cedaa6f3455" containerID="00f8e0166fd342bafa60c772f6bcd04c10983bd17cd40860dd4ccef388310765" exitCode=0 Nov 24 14:47:21 crc kubenswrapper[5039]: I1124 14:47:21.280016 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7r859" event={"ID":"932a7990-788c-4892-a7d0-8cedaa6f3455","Type":"ContainerDied","Data":"00f8e0166fd342bafa60c772f6bcd04c10983bd17cd40860dd4ccef388310765"} Nov 24 14:47:22 crc kubenswrapper[5039]: I1124 14:47:22.297313 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7r859" event={"ID":"932a7990-788c-4892-a7d0-8cedaa6f3455","Type":"ContainerStarted","Data":"2f8f5a9ed60a950bddbbec18b8a922512e8b1e290ab0aea8717213b5eb56d468"} Nov 24 14:47:22 crc kubenswrapper[5039]: I1124 14:47:22.331322 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7r859" podStartSLOduration=2.8897223 podStartE2EDuration="5.331301318s" podCreationTimestamp="2025-11-24 14:47:17 +0000 UTC" firstStartedPulling="2025-11-24 14:47:19.255632098 +0000 UTC m=+5351.694756598" lastFinishedPulling="2025-11-24 14:47:21.697211116 +0000 UTC m=+5354.136335616" observedRunningTime="2025-11-24 14:47:22.316124247 +0000 UTC m=+5354.755248767" watchObservedRunningTime="2025-11-24 14:47:22.331301318 +0000 UTC m=+5354.770425818" Nov 24 14:47:27 crc kubenswrapper[5039]: I1124 14:47:27.781259 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7r859" Nov 24 14:47:27 crc kubenswrapper[5039]: I1124 14:47:27.781839 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7r859" Nov 24 14:47:27 crc kubenswrapper[5039]: I1124 14:47:27.877362 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7r859" Nov 24 14:47:28 crc kubenswrapper[5039]: I1124 14:47:28.466720 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7r859" Nov 24 14:47:28 crc kubenswrapper[5039]: I1124 14:47:28.528170 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7r859"] Nov 24 14:47:30 crc kubenswrapper[5039]: I1124 14:47:30.422599 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7r859" podUID="932a7990-788c-4892-a7d0-8cedaa6f3455" containerName="registry-server" containerID="cri-o://2f8f5a9ed60a950bddbbec18b8a922512e8b1e290ab0aea8717213b5eb56d468" gracePeriod=2 Nov 24 14:47:31 crc kubenswrapper[5039]: I1124 14:47:31.439006 5039 generic.go:334] "Generic (PLEG): container finished" podID="932a7990-788c-4892-a7d0-8cedaa6f3455" containerID="2f8f5a9ed60a950bddbbec18b8a922512e8b1e290ab0aea8717213b5eb56d468" exitCode=0 Nov 24 14:47:31 crc kubenswrapper[5039]: I1124 14:47:31.439097 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7r859" event={"ID":"932a7990-788c-4892-a7d0-8cedaa6f3455","Type":"ContainerDied","Data":"2f8f5a9ed60a950bddbbec18b8a922512e8b1e290ab0aea8717213b5eb56d468"} Nov 24 14:47:31 crc kubenswrapper[5039]: I1124 14:47:31.919563 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7r859" Nov 24 14:47:31 crc kubenswrapper[5039]: I1124 14:47:31.996132 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-255tz\" (UniqueName: \"kubernetes.io/projected/932a7990-788c-4892-a7d0-8cedaa6f3455-kube-api-access-255tz\") pod \"932a7990-788c-4892-a7d0-8cedaa6f3455\" (UID: \"932a7990-788c-4892-a7d0-8cedaa6f3455\") " Nov 24 14:47:31 crc kubenswrapper[5039]: I1124 14:47:31.996246 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/932a7990-788c-4892-a7d0-8cedaa6f3455-utilities\") pod \"932a7990-788c-4892-a7d0-8cedaa6f3455\" (UID: \"932a7990-788c-4892-a7d0-8cedaa6f3455\") " Nov 24 14:47:31 crc kubenswrapper[5039]: I1124 14:47:31.996535 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/932a7990-788c-4892-a7d0-8cedaa6f3455-catalog-content\") pod \"932a7990-788c-4892-a7d0-8cedaa6f3455\" (UID: \"932a7990-788c-4892-a7d0-8cedaa6f3455\") " Nov 24 14:47:31 crc kubenswrapper[5039]: I1124 14:47:31.997844 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/932a7990-788c-4892-a7d0-8cedaa6f3455-utilities" (OuterVolumeSpecName: "utilities") pod "932a7990-788c-4892-a7d0-8cedaa6f3455" (UID: "932a7990-788c-4892-a7d0-8cedaa6f3455"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:47:32 crc kubenswrapper[5039]: I1124 14:47:32.004422 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/932a7990-788c-4892-a7d0-8cedaa6f3455-kube-api-access-255tz" (OuterVolumeSpecName: "kube-api-access-255tz") pod "932a7990-788c-4892-a7d0-8cedaa6f3455" (UID: "932a7990-788c-4892-a7d0-8cedaa6f3455"). InnerVolumeSpecName "kube-api-access-255tz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:47:32 crc kubenswrapper[5039]: I1124 14:47:32.043009 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/932a7990-788c-4892-a7d0-8cedaa6f3455-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "932a7990-788c-4892-a7d0-8cedaa6f3455" (UID: "932a7990-788c-4892-a7d0-8cedaa6f3455"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:47:32 crc kubenswrapper[5039]: I1124 14:47:32.099402 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-255tz\" (UniqueName: \"kubernetes.io/projected/932a7990-788c-4892-a7d0-8cedaa6f3455-kube-api-access-255tz\") on node \"crc\" DevicePath \"\"" Nov 24 14:47:32 crc kubenswrapper[5039]: I1124 14:47:32.099448 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/932a7990-788c-4892-a7d0-8cedaa6f3455-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:47:32 crc kubenswrapper[5039]: I1124 14:47:32.099460 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/932a7990-788c-4892-a7d0-8cedaa6f3455-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:47:32 crc kubenswrapper[5039]: I1124 14:47:32.458700 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7r859" event={"ID":"932a7990-788c-4892-a7d0-8cedaa6f3455","Type":"ContainerDied","Data":"8edada0084c29ac0f6215d083a1fbb62609bbc56236c8d393ba8f1a243c7add0"} Nov 24 14:47:32 crc kubenswrapper[5039]: I1124 14:47:32.458769 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7r859" Nov 24 14:47:32 crc kubenswrapper[5039]: I1124 14:47:32.459157 5039 scope.go:117] "RemoveContainer" containerID="2f8f5a9ed60a950bddbbec18b8a922512e8b1e290ab0aea8717213b5eb56d468" Nov 24 14:47:32 crc kubenswrapper[5039]: I1124 14:47:32.500580 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7r859"] Nov 24 14:47:32 crc kubenswrapper[5039]: I1124 14:47:32.507560 5039 scope.go:117] "RemoveContainer" containerID="00f8e0166fd342bafa60c772f6bcd04c10983bd17cd40860dd4ccef388310765" Nov 24 14:47:32 crc kubenswrapper[5039]: I1124 14:47:32.514454 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7r859"] Nov 24 14:47:32 crc kubenswrapper[5039]: I1124 14:47:32.790581 5039 scope.go:117] "RemoveContainer" containerID="ad38856c88286668d1667ec91155d91f565515a5ccc46e00b172c10ace24ce16" Nov 24 14:47:34 crc kubenswrapper[5039]: I1124 14:47:34.324343 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="932a7990-788c-4892-a7d0-8cedaa6f3455" path="/var/lib/kubelet/pods/932a7990-788c-4892-a7d0-8cedaa6f3455/volumes" Nov 24 14:48:20 crc kubenswrapper[5039]: I1124 14:48:20.101784 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:48:20 crc kubenswrapper[5039]: I1124 14:48:20.102640 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:48:50 crc kubenswrapper[5039]: I1124 14:48:50.101979 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:48:50 crc kubenswrapper[5039]: I1124 14:48:50.102669 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:49:20 crc kubenswrapper[5039]: I1124 14:49:20.101589 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:49:20 crc kubenswrapper[5039]: I1124 14:49:20.102293 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:49:20 crc kubenswrapper[5039]: I1124 14:49:20.102365 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 14:49:20 crc kubenswrapper[5039]: I1124 14:49:20.103646 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"588ec00ff0baad475dbb7f43efd2ba6987e01bf02046b7c136c3ad7b91310933"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 14:49:20 crc kubenswrapper[5039]: I1124 14:49:20.103750 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://588ec00ff0baad475dbb7f43efd2ba6987e01bf02046b7c136c3ad7b91310933" gracePeriod=600 Nov 24 14:49:20 crc kubenswrapper[5039]: I1124 14:49:20.810410 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="588ec00ff0baad475dbb7f43efd2ba6987e01bf02046b7c136c3ad7b91310933" exitCode=0 Nov 24 14:49:20 crc kubenswrapper[5039]: I1124 14:49:20.810552 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"588ec00ff0baad475dbb7f43efd2ba6987e01bf02046b7c136c3ad7b91310933"} Nov 24 14:49:20 crc kubenswrapper[5039]: I1124 14:49:20.810929 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7"} Nov 24 14:49:20 crc kubenswrapper[5039]: I1124 14:49:20.810959 5039 scope.go:117] "RemoveContainer" containerID="872016ba27c648eeaccfdbe529ecc788e0311c47a962839ec045434095bbf8aa" Nov 24 14:49:54 crc kubenswrapper[5039]: E1124 14:49:54.810299 5039 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.175:49092->38.102.83.175:41425: write tcp 38.102.83.175:49092->38.102.83.175:41425: write: broken pipe Nov 24 14:50:48 crc kubenswrapper[5039]: E1124 14:50:48.013756 5039 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.175:34834->38.102.83.175:41425: write tcp 38.102.83.175:34834->38.102.83.175:41425: write: broken pipe Nov 24 14:51:20 crc kubenswrapper[5039]: I1124 14:51:20.101625 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:51:20 crc kubenswrapper[5039]: I1124 14:51:20.102146 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:51:50 crc kubenswrapper[5039]: I1124 14:51:50.101255 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:51:50 crc kubenswrapper[5039]: I1124 14:51:50.102012 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:51:56 crc kubenswrapper[5039]: I1124 14:51:56.023551 5039 scope.go:117] "RemoveContainer" containerID="f0b928053b71a95f2d078cba4809244dfd22b14405da949d50d2be5d3b805a8b" Nov 24 14:51:56 crc kubenswrapper[5039]: I1124 14:51:56.059351 5039 scope.go:117] "RemoveContainer" containerID="8b1970035c1096cecdc06ec6c90d637561dff5e9bfeebdf72ecd4115a4b3a214" Nov 24 14:51:56 crc kubenswrapper[5039]: I1124 14:51:56.092654 5039 scope.go:117] "RemoveContainer" containerID="e5a832c19c19002819bb04fe4161e19cb48eb40a7350d3d682f36ccec6111f6b" Nov 24 14:52:20 crc kubenswrapper[5039]: I1124 14:52:20.101833 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:52:20 crc kubenswrapper[5039]: I1124 14:52:20.102571 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 14:52:20 crc kubenswrapper[5039]: I1124 14:52:20.102632 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 14:52:20 crc kubenswrapper[5039]: I1124 14:52:20.103405 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 14:52:20 crc kubenswrapper[5039]: I1124 14:52:20.103455 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" gracePeriod=600 Nov 24 14:52:20 crc kubenswrapper[5039]: E1124 14:52:20.226526 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:52:20 crc kubenswrapper[5039]: I1124 14:52:20.458454 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" exitCode=0 Nov 24 14:52:20 crc kubenswrapper[5039]: I1124 14:52:20.458521 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7"} Nov 24 14:52:20 crc kubenswrapper[5039]: I1124 14:52:20.458562 5039 scope.go:117] "RemoveContainer" containerID="588ec00ff0baad475dbb7f43efd2ba6987e01bf02046b7c136c3ad7b91310933" Nov 24 14:52:20 crc kubenswrapper[5039]: I1124 14:52:20.459272 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:52:20 crc kubenswrapper[5039]: E1124 14:52:20.459628 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:52:31 crc kubenswrapper[5039]: I1124 14:52:31.310540 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:52:31 crc kubenswrapper[5039]: E1124 14:52:31.325023 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:52:46 crc kubenswrapper[5039]: I1124 14:52:46.306575 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:52:46 crc kubenswrapper[5039]: E1124 14:52:46.307752 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:52:57 crc kubenswrapper[5039]: I1124 14:52:57.307115 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:52:57 crc kubenswrapper[5039]: E1124 14:52:57.308075 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:53:11 crc kubenswrapper[5039]: I1124 14:53:11.308615 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:53:11 crc kubenswrapper[5039]: E1124 14:53:11.309608 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:53:24 crc kubenswrapper[5039]: I1124 14:53:24.307599 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:53:24 crc kubenswrapper[5039]: E1124 14:53:24.308795 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:53:39 crc kubenswrapper[5039]: I1124 14:53:39.307212 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:53:39 crc kubenswrapper[5039]: E1124 14:53:39.308317 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:53:46 crc kubenswrapper[5039]: I1124 14:53:46.907824 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vlm2w"] Nov 24 14:53:46 crc kubenswrapper[5039]: E1124 14:53:46.909220 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="932a7990-788c-4892-a7d0-8cedaa6f3455" containerName="registry-server" Nov 24 14:53:46 crc kubenswrapper[5039]: I1124 14:53:46.909327 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="932a7990-788c-4892-a7d0-8cedaa6f3455" containerName="registry-server" Nov 24 14:53:46 crc kubenswrapper[5039]: E1124 14:53:46.909350 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="932a7990-788c-4892-a7d0-8cedaa6f3455" containerName="extract-utilities" Nov 24 14:53:46 crc kubenswrapper[5039]: I1124 14:53:46.909358 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="932a7990-788c-4892-a7d0-8cedaa6f3455" containerName="extract-utilities" Nov 24 14:53:46 crc kubenswrapper[5039]: E1124 14:53:46.909404 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="932a7990-788c-4892-a7d0-8cedaa6f3455" containerName="extract-content" Nov 24 14:53:46 crc kubenswrapper[5039]: I1124 14:53:46.909412 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="932a7990-788c-4892-a7d0-8cedaa6f3455" containerName="extract-content" Nov 24 14:53:46 crc kubenswrapper[5039]: I1124 14:53:46.909773 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="932a7990-788c-4892-a7d0-8cedaa6f3455" containerName="registry-server" Nov 24 14:53:46 crc kubenswrapper[5039]: I1124 14:53:46.912427 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vlm2w" Nov 24 14:53:46 crc kubenswrapper[5039]: I1124 14:53:46.917323 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vlm2w"] Nov 24 14:53:46 crc kubenswrapper[5039]: I1124 14:53:46.966525 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8-utilities\") pod \"redhat-operators-vlm2w\" (UID: \"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8\") " pod="openshift-marketplace/redhat-operators-vlm2w" Nov 24 14:53:46 crc kubenswrapper[5039]: I1124 14:53:46.966723 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8-catalog-content\") pod \"redhat-operators-vlm2w\" (UID: \"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8\") " pod="openshift-marketplace/redhat-operators-vlm2w" Nov 24 14:53:46 crc kubenswrapper[5039]: I1124 14:53:46.966824 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjbbw\" (UniqueName: \"kubernetes.io/projected/7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8-kube-api-access-bjbbw\") pod \"redhat-operators-vlm2w\" (UID: \"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8\") " pod="openshift-marketplace/redhat-operators-vlm2w" Nov 24 14:53:47 crc kubenswrapper[5039]: I1124 14:53:47.069420 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjbbw\" (UniqueName: \"kubernetes.io/projected/7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8-kube-api-access-bjbbw\") pod \"redhat-operators-vlm2w\" (UID: \"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8\") " pod="openshift-marketplace/redhat-operators-vlm2w" Nov 24 14:53:47 crc kubenswrapper[5039]: I1124 14:53:47.069573 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8-utilities\") pod \"redhat-operators-vlm2w\" (UID: \"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8\") " pod="openshift-marketplace/redhat-operators-vlm2w" Nov 24 14:53:47 crc kubenswrapper[5039]: I1124 14:53:47.069715 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8-catalog-content\") pod \"redhat-operators-vlm2w\" (UID: \"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8\") " pod="openshift-marketplace/redhat-operators-vlm2w" Nov 24 14:53:47 crc kubenswrapper[5039]: I1124 14:53:47.070300 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8-utilities\") pod \"redhat-operators-vlm2w\" (UID: \"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8\") " pod="openshift-marketplace/redhat-operators-vlm2w" Nov 24 14:53:47 crc kubenswrapper[5039]: I1124 14:53:47.070316 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8-catalog-content\") pod \"redhat-operators-vlm2w\" (UID: \"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8\") " pod="openshift-marketplace/redhat-operators-vlm2w" Nov 24 14:53:47 crc kubenswrapper[5039]: I1124 14:53:47.478258 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjbbw\" (UniqueName: \"kubernetes.io/projected/7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8-kube-api-access-bjbbw\") pod \"redhat-operators-vlm2w\" (UID: \"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8\") " pod="openshift-marketplace/redhat-operators-vlm2w" Nov 24 14:53:47 crc kubenswrapper[5039]: I1124 14:53:47.547082 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vlm2w" Nov 24 14:53:48 crc kubenswrapper[5039]: I1124 14:53:48.248947 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vlm2w"] Nov 24 14:53:48 crc kubenswrapper[5039]: I1124 14:53:48.497101 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vlm2w" event={"ID":"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8","Type":"ContainerStarted","Data":"cbed03e3c28687c9086c97c946c8f80cb586afae09d633a029b5ef005803b2fe"} Nov 24 14:53:49 crc kubenswrapper[5039]: I1124 14:53:49.509341 5039 generic.go:334] "Generic (PLEG): container finished" podID="7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8" containerID="0ef9dfa5739bfe201bd860a9beaf0778784a8e7007bf7fe813614373e4780052" exitCode=0 Nov 24 14:53:49 crc kubenswrapper[5039]: I1124 14:53:49.509400 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vlm2w" event={"ID":"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8","Type":"ContainerDied","Data":"0ef9dfa5739bfe201bd860a9beaf0778784a8e7007bf7fe813614373e4780052"} Nov 24 14:53:49 crc kubenswrapper[5039]: I1124 14:53:49.511969 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 14:53:50 crc kubenswrapper[5039]: I1124 14:53:50.528178 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vlm2w" event={"ID":"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8","Type":"ContainerStarted","Data":"9ebb660608f50d3b9a19d9f8d6122d02371e3359049de5b5ccf9a1fd32635ba6"} Nov 24 14:53:53 crc kubenswrapper[5039]: I1124 14:53:53.307051 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:53:53 crc kubenswrapper[5039]: E1124 14:53:53.307776 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:53:54 crc kubenswrapper[5039]: I1124 14:53:54.581223 5039 generic.go:334] "Generic (PLEG): container finished" podID="7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8" containerID="9ebb660608f50d3b9a19d9f8d6122d02371e3359049de5b5ccf9a1fd32635ba6" exitCode=0 Nov 24 14:53:54 crc kubenswrapper[5039]: I1124 14:53:54.581318 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vlm2w" event={"ID":"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8","Type":"ContainerDied","Data":"9ebb660608f50d3b9a19d9f8d6122d02371e3359049de5b5ccf9a1fd32635ba6"} Nov 24 14:53:55 crc kubenswrapper[5039]: I1124 14:53:55.593553 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vlm2w" event={"ID":"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8","Type":"ContainerStarted","Data":"1a5dc646ba3060c3dd4b7ed4ad2b649da438b77f3639c3a7620773a34e1b4b06"} Nov 24 14:53:55 crc kubenswrapper[5039]: I1124 14:53:55.629592 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vlm2w" podStartSLOduration=4.149402068 podStartE2EDuration="9.629493788s" podCreationTimestamp="2025-11-24 14:53:46 +0000 UTC" firstStartedPulling="2025-11-24 14:53:49.511631766 +0000 UTC m=+5741.950756286" lastFinishedPulling="2025-11-24 14:53:54.991723466 +0000 UTC m=+5747.430848006" observedRunningTime="2025-11-24 14:53:55.624385152 +0000 UTC m=+5748.063509662" watchObservedRunningTime="2025-11-24 14:53:55.629493788 +0000 UTC m=+5748.068618298" Nov 24 14:53:57 crc kubenswrapper[5039]: I1124 14:53:57.547349 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vlm2w" Nov 24 14:53:57 crc kubenswrapper[5039]: I1124 14:53:57.549214 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vlm2w" Nov 24 14:53:58 crc kubenswrapper[5039]: I1124 14:53:58.618161 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vlm2w" podUID="7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8" containerName="registry-server" probeResult="failure" output=< Nov 24 14:53:58 crc kubenswrapper[5039]: timeout: failed to connect service ":50051" within 1s Nov 24 14:53:58 crc kubenswrapper[5039]: > Nov 24 14:54:06 crc kubenswrapper[5039]: I1124 14:54:06.307098 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:54:06 crc kubenswrapper[5039]: E1124 14:54:06.310039 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:54:07 crc kubenswrapper[5039]: I1124 14:54:07.594744 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vlm2w" Nov 24 14:54:07 crc kubenswrapper[5039]: I1124 14:54:07.653802 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vlm2w" Nov 24 14:54:07 crc kubenswrapper[5039]: I1124 14:54:07.833771 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vlm2w"] Nov 24 14:54:08 crc kubenswrapper[5039]: I1124 14:54:08.747225 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vlm2w" podUID="7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8" containerName="registry-server" containerID="cri-o://1a5dc646ba3060c3dd4b7ed4ad2b649da438b77f3639c3a7620773a34e1b4b06" gracePeriod=2 Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.300701 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vlm2w" Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.428666 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8-utilities\") pod \"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8\" (UID: \"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8\") " Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.428925 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bjbbw\" (UniqueName: \"kubernetes.io/projected/7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8-kube-api-access-bjbbw\") pod \"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8\" (UID: \"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8\") " Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.428970 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8-catalog-content\") pod \"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8\" (UID: \"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8\") " Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.430393 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8-utilities" (OuterVolumeSpecName: "utilities") pod "7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8" (UID: "7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.437240 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8-kube-api-access-bjbbw" (OuterVolumeSpecName: "kube-api-access-bjbbw") pod "7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8" (UID: "7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8"). InnerVolumeSpecName "kube-api-access-bjbbw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.528060 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8" (UID: "7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.531442 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bjbbw\" (UniqueName: \"kubernetes.io/projected/7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8-kube-api-access-bjbbw\") on node \"crc\" DevicePath \"\"" Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.531485 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.531495 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.759362 5039 generic.go:334] "Generic (PLEG): container finished" podID="7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8" containerID="1a5dc646ba3060c3dd4b7ed4ad2b649da438b77f3639c3a7620773a34e1b4b06" exitCode=0 Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.759405 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vlm2w" event={"ID":"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8","Type":"ContainerDied","Data":"1a5dc646ba3060c3dd4b7ed4ad2b649da438b77f3639c3a7620773a34e1b4b06"} Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.759430 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vlm2w" event={"ID":"7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8","Type":"ContainerDied","Data":"cbed03e3c28687c9086c97c946c8f80cb586afae09d633a029b5ef005803b2fe"} Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.759446 5039 scope.go:117] "RemoveContainer" containerID="1a5dc646ba3060c3dd4b7ed4ad2b649da438b77f3639c3a7620773a34e1b4b06" Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.759583 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vlm2w" Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.794212 5039 scope.go:117] "RemoveContainer" containerID="9ebb660608f50d3b9a19d9f8d6122d02371e3359049de5b5ccf9a1fd32635ba6" Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.808753 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vlm2w"] Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.819801 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vlm2w"] Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.833742 5039 scope.go:117] "RemoveContainer" containerID="0ef9dfa5739bfe201bd860a9beaf0778784a8e7007bf7fe813614373e4780052" Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.893059 5039 scope.go:117] "RemoveContainer" containerID="1a5dc646ba3060c3dd4b7ed4ad2b649da438b77f3639c3a7620773a34e1b4b06" Nov 24 14:54:09 crc kubenswrapper[5039]: E1124 14:54:09.893725 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a5dc646ba3060c3dd4b7ed4ad2b649da438b77f3639c3a7620773a34e1b4b06\": container with ID starting with 1a5dc646ba3060c3dd4b7ed4ad2b649da438b77f3639c3a7620773a34e1b4b06 not found: ID does not exist" containerID="1a5dc646ba3060c3dd4b7ed4ad2b649da438b77f3639c3a7620773a34e1b4b06" Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.893775 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a5dc646ba3060c3dd4b7ed4ad2b649da438b77f3639c3a7620773a34e1b4b06"} err="failed to get container status \"1a5dc646ba3060c3dd4b7ed4ad2b649da438b77f3639c3a7620773a34e1b4b06\": rpc error: code = NotFound desc = could not find container \"1a5dc646ba3060c3dd4b7ed4ad2b649da438b77f3639c3a7620773a34e1b4b06\": container with ID starting with 1a5dc646ba3060c3dd4b7ed4ad2b649da438b77f3639c3a7620773a34e1b4b06 not found: ID does not exist" Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.893810 5039 scope.go:117] "RemoveContainer" containerID="9ebb660608f50d3b9a19d9f8d6122d02371e3359049de5b5ccf9a1fd32635ba6" Nov 24 14:54:09 crc kubenswrapper[5039]: E1124 14:54:09.894408 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ebb660608f50d3b9a19d9f8d6122d02371e3359049de5b5ccf9a1fd32635ba6\": container with ID starting with 9ebb660608f50d3b9a19d9f8d6122d02371e3359049de5b5ccf9a1fd32635ba6 not found: ID does not exist" containerID="9ebb660608f50d3b9a19d9f8d6122d02371e3359049de5b5ccf9a1fd32635ba6" Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.894455 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ebb660608f50d3b9a19d9f8d6122d02371e3359049de5b5ccf9a1fd32635ba6"} err="failed to get container status \"9ebb660608f50d3b9a19d9f8d6122d02371e3359049de5b5ccf9a1fd32635ba6\": rpc error: code = NotFound desc = could not find container \"9ebb660608f50d3b9a19d9f8d6122d02371e3359049de5b5ccf9a1fd32635ba6\": container with ID starting with 9ebb660608f50d3b9a19d9f8d6122d02371e3359049de5b5ccf9a1fd32635ba6 not found: ID does not exist" Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.894488 5039 scope.go:117] "RemoveContainer" containerID="0ef9dfa5739bfe201bd860a9beaf0778784a8e7007bf7fe813614373e4780052" Nov 24 14:54:09 crc kubenswrapper[5039]: E1124 14:54:09.894844 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ef9dfa5739bfe201bd860a9beaf0778784a8e7007bf7fe813614373e4780052\": container with ID starting with 0ef9dfa5739bfe201bd860a9beaf0778784a8e7007bf7fe813614373e4780052 not found: ID does not exist" containerID="0ef9dfa5739bfe201bd860a9beaf0778784a8e7007bf7fe813614373e4780052" Nov 24 14:54:09 crc kubenswrapper[5039]: I1124 14:54:09.894894 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ef9dfa5739bfe201bd860a9beaf0778784a8e7007bf7fe813614373e4780052"} err="failed to get container status \"0ef9dfa5739bfe201bd860a9beaf0778784a8e7007bf7fe813614373e4780052\": rpc error: code = NotFound desc = could not find container \"0ef9dfa5739bfe201bd860a9beaf0778784a8e7007bf7fe813614373e4780052\": container with ID starting with 0ef9dfa5739bfe201bd860a9beaf0778784a8e7007bf7fe813614373e4780052 not found: ID does not exist" Nov 24 14:54:10 crc kubenswrapper[5039]: I1124 14:54:10.328841 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8" path="/var/lib/kubelet/pods/7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8/volumes" Nov 24 14:54:18 crc kubenswrapper[5039]: I1124 14:54:18.323574 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:54:18 crc kubenswrapper[5039]: E1124 14:54:18.324710 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:54:29 crc kubenswrapper[5039]: I1124 14:54:29.307718 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:54:29 crc kubenswrapper[5039]: E1124 14:54:29.308917 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:54:43 crc kubenswrapper[5039]: I1124 14:54:43.307765 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:54:43 crc kubenswrapper[5039]: E1124 14:54:43.308909 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:54:56 crc kubenswrapper[5039]: I1124 14:54:56.307427 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:54:56 crc kubenswrapper[5039]: E1124 14:54:56.308288 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:55:09 crc kubenswrapper[5039]: I1124 14:55:09.306629 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:55:09 crc kubenswrapper[5039]: E1124 14:55:09.307373 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.335781 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 24 14:55:18 crc kubenswrapper[5039]: E1124 14:55:18.336938 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8" containerName="registry-server" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.336957 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8" containerName="registry-server" Nov 24 14:55:18 crc kubenswrapper[5039]: E1124 14:55:18.336974 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8" containerName="extract-utilities" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.336983 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8" containerName="extract-utilities" Nov 24 14:55:18 crc kubenswrapper[5039]: E1124 14:55:18.337012 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8" containerName="extract-content" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.337018 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8" containerName="extract-content" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.337274 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bc55fb9-0ccd-4458-8f44-5f2cb8b6cbd8" containerName="registry-server" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.338187 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.345085 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-5cncf" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.345303 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.345498 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.345624 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.360733 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.526873 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/e515a4f0-d838-4d61-906b-f26a0c07f8c8-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.527353 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e515a4f0-d838-4d61-906b-f26a0c07f8c8-config-data\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.527431 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e515a4f0-d838-4d61-906b-f26a0c07f8c8-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.527531 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e515a4f0-d838-4d61-906b-f26a0c07f8c8-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.527567 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/e515a4f0-d838-4d61-906b-f26a0c07f8c8-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.527651 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.527714 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/e515a4f0-d838-4d61-906b-f26a0c07f8c8-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.527772 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfmsh\" (UniqueName: \"kubernetes.io/projected/e515a4f0-d838-4d61-906b-f26a0c07f8c8-kube-api-access-cfmsh\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.527793 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e515a4f0-d838-4d61-906b-f26a0c07f8c8-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.629181 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e515a4f0-d838-4d61-906b-f26a0c07f8c8-config-data\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.629232 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e515a4f0-d838-4d61-906b-f26a0c07f8c8-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.629276 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e515a4f0-d838-4d61-906b-f26a0c07f8c8-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.629299 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/e515a4f0-d838-4d61-906b-f26a0c07f8c8-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.629342 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.629378 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/e515a4f0-d838-4d61-906b-f26a0c07f8c8-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.629407 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfmsh\" (UniqueName: \"kubernetes.io/projected/e515a4f0-d838-4d61-906b-f26a0c07f8c8-kube-api-access-cfmsh\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.629431 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e515a4f0-d838-4d61-906b-f26a0c07f8c8-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.629626 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/e515a4f0-d838-4d61-906b-f26a0c07f8c8-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.631391 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e515a4f0-d838-4d61-906b-f26a0c07f8c8-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.632086 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/e515a4f0-d838-4d61-906b-f26a0c07f8c8-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.632247 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e515a4f0-d838-4d61-906b-f26a0c07f8c8-config-data\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.632870 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.632992 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/e515a4f0-d838-4d61-906b-f26a0c07f8c8-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.638220 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e515a4f0-d838-4d61-906b-f26a0c07f8c8-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.638590 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/e515a4f0-d838-4d61-906b-f26a0c07f8c8-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.640040 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e515a4f0-d838-4d61-906b-f26a0c07f8c8-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.648814 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfmsh\" (UniqueName: \"kubernetes.io/projected/e515a4f0-d838-4d61-906b-f26a0c07f8c8-kube-api-access-cfmsh\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.702612 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"tempest-tests-tempest\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " pod="openstack/tempest-tests-tempest" Nov 24 14:55:18 crc kubenswrapper[5039]: I1124 14:55:18.964414 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 24 14:55:19 crc kubenswrapper[5039]: I1124 14:55:19.494110 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 24 14:55:19 crc kubenswrapper[5039]: I1124 14:55:19.612920 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"e515a4f0-d838-4d61-906b-f26a0c07f8c8","Type":"ContainerStarted","Data":"698fd31c4d636106bd6c5d08af1ff15643cae9bfe3f2a0f0f509788a4c67ffc6"} Nov 24 14:55:23 crc kubenswrapper[5039]: I1124 14:55:23.307142 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:55:23 crc kubenswrapper[5039]: E1124 14:55:23.308085 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:55:38 crc kubenswrapper[5039]: I1124 14:55:38.314271 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:55:38 crc kubenswrapper[5039]: E1124 14:55:38.315100 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:55:49 crc kubenswrapper[5039]: I1124 14:55:49.307487 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:55:49 crc kubenswrapper[5039]: E1124 14:55:49.308267 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:55:51 crc kubenswrapper[5039]: E1124 14:55:51.669092 5039 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Nov 24 14:55:51 crc kubenswrapper[5039]: E1124 14:55:51.672910 5039 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cfmsh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(e515a4f0-d838-4d61-906b-f26a0c07f8c8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 14:55:51 crc kubenswrapper[5039]: E1124 14:55:51.674120 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="e515a4f0-d838-4d61-906b-f26a0c07f8c8" Nov 24 14:55:51 crc kubenswrapper[5039]: E1124 14:55:51.997399 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="e515a4f0-d838-4d61-906b-f26a0c07f8c8" Nov 24 14:55:52 crc kubenswrapper[5039]: I1124 14:55:52.909895 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-dlptg"] Nov 24 14:55:52 crc kubenswrapper[5039]: I1124 14:55:52.912189 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dlptg" Nov 24 14:55:52 crc kubenswrapper[5039]: I1124 14:55:52.923989 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dlptg"] Nov 24 14:55:53 crc kubenswrapper[5039]: I1124 14:55:53.027634 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/592272ed-6a8c-42d9-8c87-b62ba335267c-utilities\") pod \"community-operators-dlptg\" (UID: \"592272ed-6a8c-42d9-8c87-b62ba335267c\") " pod="openshift-marketplace/community-operators-dlptg" Nov 24 14:55:53 crc kubenswrapper[5039]: I1124 14:55:53.028017 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jtcc\" (UniqueName: \"kubernetes.io/projected/592272ed-6a8c-42d9-8c87-b62ba335267c-kube-api-access-8jtcc\") pod \"community-operators-dlptg\" (UID: \"592272ed-6a8c-42d9-8c87-b62ba335267c\") " pod="openshift-marketplace/community-operators-dlptg" Nov 24 14:55:53 crc kubenswrapper[5039]: I1124 14:55:53.028113 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/592272ed-6a8c-42d9-8c87-b62ba335267c-catalog-content\") pod \"community-operators-dlptg\" (UID: \"592272ed-6a8c-42d9-8c87-b62ba335267c\") " pod="openshift-marketplace/community-operators-dlptg" Nov 24 14:55:53 crc kubenswrapper[5039]: I1124 14:55:53.131103 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/592272ed-6a8c-42d9-8c87-b62ba335267c-utilities\") pod \"community-operators-dlptg\" (UID: \"592272ed-6a8c-42d9-8c87-b62ba335267c\") " pod="openshift-marketplace/community-operators-dlptg" Nov 24 14:55:53 crc kubenswrapper[5039]: I1124 14:55:53.131380 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jtcc\" (UniqueName: \"kubernetes.io/projected/592272ed-6a8c-42d9-8c87-b62ba335267c-kube-api-access-8jtcc\") pod \"community-operators-dlptg\" (UID: \"592272ed-6a8c-42d9-8c87-b62ba335267c\") " pod="openshift-marketplace/community-operators-dlptg" Nov 24 14:55:53 crc kubenswrapper[5039]: I1124 14:55:53.131432 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/592272ed-6a8c-42d9-8c87-b62ba335267c-catalog-content\") pod \"community-operators-dlptg\" (UID: \"592272ed-6a8c-42d9-8c87-b62ba335267c\") " pod="openshift-marketplace/community-operators-dlptg" Nov 24 14:55:53 crc kubenswrapper[5039]: I1124 14:55:53.132172 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/592272ed-6a8c-42d9-8c87-b62ba335267c-utilities\") pod \"community-operators-dlptg\" (UID: \"592272ed-6a8c-42d9-8c87-b62ba335267c\") " pod="openshift-marketplace/community-operators-dlptg" Nov 24 14:55:53 crc kubenswrapper[5039]: I1124 14:55:53.132488 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/592272ed-6a8c-42d9-8c87-b62ba335267c-catalog-content\") pod \"community-operators-dlptg\" (UID: \"592272ed-6a8c-42d9-8c87-b62ba335267c\") " pod="openshift-marketplace/community-operators-dlptg" Nov 24 14:55:53 crc kubenswrapper[5039]: I1124 14:55:53.153210 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jtcc\" (UniqueName: \"kubernetes.io/projected/592272ed-6a8c-42d9-8c87-b62ba335267c-kube-api-access-8jtcc\") pod \"community-operators-dlptg\" (UID: \"592272ed-6a8c-42d9-8c87-b62ba335267c\") " pod="openshift-marketplace/community-operators-dlptg" Nov 24 14:55:53 crc kubenswrapper[5039]: I1124 14:55:53.265336 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dlptg" Nov 24 14:55:53 crc kubenswrapper[5039]: I1124 14:55:53.864159 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dlptg"] Nov 24 14:55:54 crc kubenswrapper[5039]: I1124 14:55:54.020459 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dlptg" event={"ID":"592272ed-6a8c-42d9-8c87-b62ba335267c","Type":"ContainerStarted","Data":"f1a38939311b3d9897d206425645d32d3c38ce78804c39c5606217ec89ffafac"} Nov 24 14:55:55 crc kubenswrapper[5039]: I1124 14:55:55.037789 5039 generic.go:334] "Generic (PLEG): container finished" podID="592272ed-6a8c-42d9-8c87-b62ba335267c" containerID="301057e4df8cc1ed68c015ef5301474448424247e85c70923191446a1092d468" exitCode=0 Nov 24 14:55:55 crc kubenswrapper[5039]: I1124 14:55:55.037861 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dlptg" event={"ID":"592272ed-6a8c-42d9-8c87-b62ba335267c","Type":"ContainerDied","Data":"301057e4df8cc1ed68c015ef5301474448424247e85c70923191446a1092d468"} Nov 24 14:55:59 crc kubenswrapper[5039]: I1124 14:55:59.081787 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dlptg" event={"ID":"592272ed-6a8c-42d9-8c87-b62ba335267c","Type":"ContainerStarted","Data":"4ca0029763ccf2c77cc6c355750aaf1eccb133193b6288d0e1fea114ba66b0c5"} Nov 24 14:56:00 crc kubenswrapper[5039]: I1124 14:56:00.098469 5039 generic.go:334] "Generic (PLEG): container finished" podID="592272ed-6a8c-42d9-8c87-b62ba335267c" containerID="4ca0029763ccf2c77cc6c355750aaf1eccb133193b6288d0e1fea114ba66b0c5" exitCode=0 Nov 24 14:56:00 crc kubenswrapper[5039]: I1124 14:56:00.098533 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dlptg" event={"ID":"592272ed-6a8c-42d9-8c87-b62ba335267c","Type":"ContainerDied","Data":"4ca0029763ccf2c77cc6c355750aaf1eccb133193b6288d0e1fea114ba66b0c5"} Nov 24 14:56:00 crc kubenswrapper[5039]: I1124 14:56:00.306830 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:56:00 crc kubenswrapper[5039]: E1124 14:56:00.308191 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:56:01 crc kubenswrapper[5039]: I1124 14:56:01.113354 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dlptg" event={"ID":"592272ed-6a8c-42d9-8c87-b62ba335267c","Type":"ContainerStarted","Data":"ae8fba9ad166af626d604bad1d866f238de283972e5ad9e5a6f385e24e29b106"} Nov 24 14:56:01 crc kubenswrapper[5039]: I1124 14:56:01.135602 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-dlptg" podStartSLOduration=3.644961362 podStartE2EDuration="9.135580779s" podCreationTimestamp="2025-11-24 14:55:52 +0000 UTC" firstStartedPulling="2025-11-24 14:55:55.039944952 +0000 UTC m=+5867.479069452" lastFinishedPulling="2025-11-24 14:56:00.530564369 +0000 UTC m=+5872.969688869" observedRunningTime="2025-11-24 14:56:01.129636794 +0000 UTC m=+5873.568761304" watchObservedRunningTime="2025-11-24 14:56:01.135580779 +0000 UTC m=+5873.574705289" Nov 24 14:56:03 crc kubenswrapper[5039]: I1124 14:56:03.266490 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-dlptg" Nov 24 14:56:03 crc kubenswrapper[5039]: I1124 14:56:03.266967 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-dlptg" Nov 24 14:56:03 crc kubenswrapper[5039]: I1124 14:56:03.356773 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-dlptg" Nov 24 14:56:07 crc kubenswrapper[5039]: I1124 14:56:07.181991 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"e515a4f0-d838-4d61-906b-f26a0c07f8c8","Type":"ContainerStarted","Data":"43b1e426a69ff0a81ab7c12adb7bb64c76ca32ea690412b546ffaa39920dc01c"} Nov 24 14:56:07 crc kubenswrapper[5039]: I1124 14:56:07.210070 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=4.011907937 podStartE2EDuration="50.210052339s" podCreationTimestamp="2025-11-24 14:55:17 +0000 UTC" firstStartedPulling="2025-11-24 14:55:19.499018827 +0000 UTC m=+5831.938143337" lastFinishedPulling="2025-11-24 14:56:05.697163239 +0000 UTC m=+5878.136287739" observedRunningTime="2025-11-24 14:56:07.202051624 +0000 UTC m=+5879.641176124" watchObservedRunningTime="2025-11-24 14:56:07.210052339 +0000 UTC m=+5879.649176839" Nov 24 14:56:13 crc kubenswrapper[5039]: I1124 14:56:13.320583 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-dlptg" Nov 24 14:56:13 crc kubenswrapper[5039]: I1124 14:56:13.412205 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dlptg"] Nov 24 14:56:13 crc kubenswrapper[5039]: I1124 14:56:13.476310 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-srnw9"] Nov 24 14:56:13 crc kubenswrapper[5039]: I1124 14:56:13.476589 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-srnw9" podUID="13a46435-cce5-4a37-8b41-6e183ecca8f6" containerName="registry-server" containerID="cri-o://fa58d45e9e4c8283ad96fbb7a721046753ccc000c83201e19cfd09f3511f912b" gracePeriod=2 Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.132659 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-srnw9" Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.284367 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13a46435-cce5-4a37-8b41-6e183ecca8f6-utilities\") pod \"13a46435-cce5-4a37-8b41-6e183ecca8f6\" (UID: \"13a46435-cce5-4a37-8b41-6e183ecca8f6\") " Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.284496 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5t6zk\" (UniqueName: \"kubernetes.io/projected/13a46435-cce5-4a37-8b41-6e183ecca8f6-kube-api-access-5t6zk\") pod \"13a46435-cce5-4a37-8b41-6e183ecca8f6\" (UID: \"13a46435-cce5-4a37-8b41-6e183ecca8f6\") " Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.284759 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13a46435-cce5-4a37-8b41-6e183ecca8f6-catalog-content\") pod \"13a46435-cce5-4a37-8b41-6e183ecca8f6\" (UID: \"13a46435-cce5-4a37-8b41-6e183ecca8f6\") " Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.286034 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13a46435-cce5-4a37-8b41-6e183ecca8f6-utilities" (OuterVolumeSpecName: "utilities") pod "13a46435-cce5-4a37-8b41-6e183ecca8f6" (UID: "13a46435-cce5-4a37-8b41-6e183ecca8f6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.286492 5039 generic.go:334] "Generic (PLEG): container finished" podID="13a46435-cce5-4a37-8b41-6e183ecca8f6" containerID="fa58d45e9e4c8283ad96fbb7a721046753ccc000c83201e19cfd09f3511f912b" exitCode=0 Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.286851 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-srnw9" Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.287586 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-srnw9" event={"ID":"13a46435-cce5-4a37-8b41-6e183ecca8f6","Type":"ContainerDied","Data":"fa58d45e9e4c8283ad96fbb7a721046753ccc000c83201e19cfd09f3511f912b"} Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.287631 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-srnw9" event={"ID":"13a46435-cce5-4a37-8b41-6e183ecca8f6","Type":"ContainerDied","Data":"704f97b7cae052b9c23eab20eb37efddbc302633cb5c0079bd0c6804fa0f5877"} Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.287654 5039 scope.go:117] "RemoveContainer" containerID="fa58d45e9e4c8283ad96fbb7a721046753ccc000c83201e19cfd09f3511f912b" Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.294803 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13a46435-cce5-4a37-8b41-6e183ecca8f6-kube-api-access-5t6zk" (OuterVolumeSpecName: "kube-api-access-5t6zk") pod "13a46435-cce5-4a37-8b41-6e183ecca8f6" (UID: "13a46435-cce5-4a37-8b41-6e183ecca8f6"). InnerVolumeSpecName "kube-api-access-5t6zk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.309752 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:56:14 crc kubenswrapper[5039]: E1124 14:56:14.310204 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.387380 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13a46435-cce5-4a37-8b41-6e183ecca8f6-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.387422 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5t6zk\" (UniqueName: \"kubernetes.io/projected/13a46435-cce5-4a37-8b41-6e183ecca8f6-kube-api-access-5t6zk\") on node \"crc\" DevicePath \"\"" Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.401718 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13a46435-cce5-4a37-8b41-6e183ecca8f6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "13a46435-cce5-4a37-8b41-6e183ecca8f6" (UID: "13a46435-cce5-4a37-8b41-6e183ecca8f6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.425649 5039 scope.go:117] "RemoveContainer" containerID="c5e2103b8c4bcbb32445d38d866b14d4529ae57bc3df8aedbd3d942543611da6" Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.454953 5039 scope.go:117] "RemoveContainer" containerID="ff92ec781b1cf7ee5efdd80949584ccdb055b9b719c7d24762749a620da95dbb" Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.489717 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13a46435-cce5-4a37-8b41-6e183ecca8f6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.520952 5039 scope.go:117] "RemoveContainer" containerID="fa58d45e9e4c8283ad96fbb7a721046753ccc000c83201e19cfd09f3511f912b" Nov 24 14:56:14 crc kubenswrapper[5039]: E1124 14:56:14.521597 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa58d45e9e4c8283ad96fbb7a721046753ccc000c83201e19cfd09f3511f912b\": container with ID starting with fa58d45e9e4c8283ad96fbb7a721046753ccc000c83201e19cfd09f3511f912b not found: ID does not exist" containerID="fa58d45e9e4c8283ad96fbb7a721046753ccc000c83201e19cfd09f3511f912b" Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.521621 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa58d45e9e4c8283ad96fbb7a721046753ccc000c83201e19cfd09f3511f912b"} err="failed to get container status \"fa58d45e9e4c8283ad96fbb7a721046753ccc000c83201e19cfd09f3511f912b\": rpc error: code = NotFound desc = could not find container \"fa58d45e9e4c8283ad96fbb7a721046753ccc000c83201e19cfd09f3511f912b\": container with ID starting with fa58d45e9e4c8283ad96fbb7a721046753ccc000c83201e19cfd09f3511f912b not found: ID does not exist" Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.521645 5039 scope.go:117] "RemoveContainer" containerID="c5e2103b8c4bcbb32445d38d866b14d4529ae57bc3df8aedbd3d942543611da6" Nov 24 14:56:14 crc kubenswrapper[5039]: E1124 14:56:14.522046 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5e2103b8c4bcbb32445d38d866b14d4529ae57bc3df8aedbd3d942543611da6\": container with ID starting with c5e2103b8c4bcbb32445d38d866b14d4529ae57bc3df8aedbd3d942543611da6 not found: ID does not exist" containerID="c5e2103b8c4bcbb32445d38d866b14d4529ae57bc3df8aedbd3d942543611da6" Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.522080 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5e2103b8c4bcbb32445d38d866b14d4529ae57bc3df8aedbd3d942543611da6"} err="failed to get container status \"c5e2103b8c4bcbb32445d38d866b14d4529ae57bc3df8aedbd3d942543611da6\": rpc error: code = NotFound desc = could not find container \"c5e2103b8c4bcbb32445d38d866b14d4529ae57bc3df8aedbd3d942543611da6\": container with ID starting with c5e2103b8c4bcbb32445d38d866b14d4529ae57bc3df8aedbd3d942543611da6 not found: ID does not exist" Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.522101 5039 scope.go:117] "RemoveContainer" containerID="ff92ec781b1cf7ee5efdd80949584ccdb055b9b719c7d24762749a620da95dbb" Nov 24 14:56:14 crc kubenswrapper[5039]: E1124 14:56:14.522406 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff92ec781b1cf7ee5efdd80949584ccdb055b9b719c7d24762749a620da95dbb\": container with ID starting with ff92ec781b1cf7ee5efdd80949584ccdb055b9b719c7d24762749a620da95dbb not found: ID does not exist" containerID="ff92ec781b1cf7ee5efdd80949584ccdb055b9b719c7d24762749a620da95dbb" Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.522425 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff92ec781b1cf7ee5efdd80949584ccdb055b9b719c7d24762749a620da95dbb"} err="failed to get container status \"ff92ec781b1cf7ee5efdd80949584ccdb055b9b719c7d24762749a620da95dbb\": rpc error: code = NotFound desc = could not find container \"ff92ec781b1cf7ee5efdd80949584ccdb055b9b719c7d24762749a620da95dbb\": container with ID starting with ff92ec781b1cf7ee5efdd80949584ccdb055b9b719c7d24762749a620da95dbb not found: ID does not exist" Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.677017 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-srnw9"] Nov 24 14:56:14 crc kubenswrapper[5039]: I1124 14:56:14.689616 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-srnw9"] Nov 24 14:56:16 crc kubenswrapper[5039]: I1124 14:56:16.334383 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13a46435-cce5-4a37-8b41-6e183ecca8f6" path="/var/lib/kubelet/pods/13a46435-cce5-4a37-8b41-6e183ecca8f6/volumes" Nov 24 14:56:25 crc kubenswrapper[5039]: I1124 14:56:25.306443 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:56:25 crc kubenswrapper[5039]: E1124 14:56:25.307284 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:56:40 crc kubenswrapper[5039]: I1124 14:56:40.307749 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:56:40 crc kubenswrapper[5039]: E1124 14:56:40.308579 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:56:48 crc kubenswrapper[5039]: I1124 14:56:48.558484 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-5b66587b55-thzjl" podUID="bd1bf6a5-309b-4960-8f37-34b006db3599" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 24 14:56:54 crc kubenswrapper[5039]: I1124 14:56:54.306487 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:56:54 crc kubenswrapper[5039]: E1124 14:56:54.307380 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:57:03 crc kubenswrapper[5039]: I1124 14:57:03.607814 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-d8mdk"] Nov 24 14:57:03 crc kubenswrapper[5039]: E1124 14:57:03.610450 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13a46435-cce5-4a37-8b41-6e183ecca8f6" containerName="registry-server" Nov 24 14:57:03 crc kubenswrapper[5039]: I1124 14:57:03.610480 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="13a46435-cce5-4a37-8b41-6e183ecca8f6" containerName="registry-server" Nov 24 14:57:03 crc kubenswrapper[5039]: E1124 14:57:03.610496 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13a46435-cce5-4a37-8b41-6e183ecca8f6" containerName="extract-utilities" Nov 24 14:57:03 crc kubenswrapper[5039]: I1124 14:57:03.610521 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="13a46435-cce5-4a37-8b41-6e183ecca8f6" containerName="extract-utilities" Nov 24 14:57:03 crc kubenswrapper[5039]: E1124 14:57:03.610564 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13a46435-cce5-4a37-8b41-6e183ecca8f6" containerName="extract-content" Nov 24 14:57:03 crc kubenswrapper[5039]: I1124 14:57:03.610571 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="13a46435-cce5-4a37-8b41-6e183ecca8f6" containerName="extract-content" Nov 24 14:57:03 crc kubenswrapper[5039]: I1124 14:57:03.610844 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="13a46435-cce5-4a37-8b41-6e183ecca8f6" containerName="registry-server" Nov 24 14:57:03 crc kubenswrapper[5039]: I1124 14:57:03.613870 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d8mdk" Nov 24 14:57:03 crc kubenswrapper[5039]: I1124 14:57:03.634996 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d8mdk"] Nov 24 14:57:03 crc kubenswrapper[5039]: I1124 14:57:03.698962 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45fq6\" (UniqueName: \"kubernetes.io/projected/48c404b7-421e-4fe9-a981-4246d09cf6d6-kube-api-access-45fq6\") pod \"redhat-marketplace-d8mdk\" (UID: \"48c404b7-421e-4fe9-a981-4246d09cf6d6\") " pod="openshift-marketplace/redhat-marketplace-d8mdk" Nov 24 14:57:03 crc kubenswrapper[5039]: I1124 14:57:03.699344 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48c404b7-421e-4fe9-a981-4246d09cf6d6-catalog-content\") pod \"redhat-marketplace-d8mdk\" (UID: \"48c404b7-421e-4fe9-a981-4246d09cf6d6\") " pod="openshift-marketplace/redhat-marketplace-d8mdk" Nov 24 14:57:03 crc kubenswrapper[5039]: I1124 14:57:03.699407 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48c404b7-421e-4fe9-a981-4246d09cf6d6-utilities\") pod \"redhat-marketplace-d8mdk\" (UID: \"48c404b7-421e-4fe9-a981-4246d09cf6d6\") " pod="openshift-marketplace/redhat-marketplace-d8mdk" Nov 24 14:57:03 crc kubenswrapper[5039]: I1124 14:57:03.801416 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45fq6\" (UniqueName: \"kubernetes.io/projected/48c404b7-421e-4fe9-a981-4246d09cf6d6-kube-api-access-45fq6\") pod \"redhat-marketplace-d8mdk\" (UID: \"48c404b7-421e-4fe9-a981-4246d09cf6d6\") " pod="openshift-marketplace/redhat-marketplace-d8mdk" Nov 24 14:57:03 crc kubenswrapper[5039]: I1124 14:57:03.801489 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48c404b7-421e-4fe9-a981-4246d09cf6d6-catalog-content\") pod \"redhat-marketplace-d8mdk\" (UID: \"48c404b7-421e-4fe9-a981-4246d09cf6d6\") " pod="openshift-marketplace/redhat-marketplace-d8mdk" Nov 24 14:57:03 crc kubenswrapper[5039]: I1124 14:57:03.801541 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48c404b7-421e-4fe9-a981-4246d09cf6d6-utilities\") pod \"redhat-marketplace-d8mdk\" (UID: \"48c404b7-421e-4fe9-a981-4246d09cf6d6\") " pod="openshift-marketplace/redhat-marketplace-d8mdk" Nov 24 14:57:03 crc kubenswrapper[5039]: I1124 14:57:03.805156 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48c404b7-421e-4fe9-a981-4246d09cf6d6-utilities\") pod \"redhat-marketplace-d8mdk\" (UID: \"48c404b7-421e-4fe9-a981-4246d09cf6d6\") " pod="openshift-marketplace/redhat-marketplace-d8mdk" Nov 24 14:57:03 crc kubenswrapper[5039]: I1124 14:57:03.805745 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48c404b7-421e-4fe9-a981-4246d09cf6d6-catalog-content\") pod \"redhat-marketplace-d8mdk\" (UID: \"48c404b7-421e-4fe9-a981-4246d09cf6d6\") " pod="openshift-marketplace/redhat-marketplace-d8mdk" Nov 24 14:57:03 crc kubenswrapper[5039]: I1124 14:57:03.828225 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45fq6\" (UniqueName: \"kubernetes.io/projected/48c404b7-421e-4fe9-a981-4246d09cf6d6-kube-api-access-45fq6\") pod \"redhat-marketplace-d8mdk\" (UID: \"48c404b7-421e-4fe9-a981-4246d09cf6d6\") " pod="openshift-marketplace/redhat-marketplace-d8mdk" Nov 24 14:57:03 crc kubenswrapper[5039]: I1124 14:57:03.944190 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d8mdk" Nov 24 14:57:04 crc kubenswrapper[5039]: I1124 14:57:04.639867 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d8mdk"] Nov 24 14:57:04 crc kubenswrapper[5039]: W1124 14:57:04.667595 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod48c404b7_421e_4fe9_a981_4246d09cf6d6.slice/crio-f79be13c2f23891fb75f865c62e1a97324013c65851262051d541bf89e97d283 WatchSource:0}: Error finding container f79be13c2f23891fb75f865c62e1a97324013c65851262051d541bf89e97d283: Status 404 returned error can't find the container with id f79be13c2f23891fb75f865c62e1a97324013c65851262051d541bf89e97d283 Nov 24 14:57:04 crc kubenswrapper[5039]: I1124 14:57:04.915945 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d8mdk" event={"ID":"48c404b7-421e-4fe9-a981-4246d09cf6d6","Type":"ContainerStarted","Data":"f79be13c2f23891fb75f865c62e1a97324013c65851262051d541bf89e97d283"} Nov 24 14:57:05 crc kubenswrapper[5039]: I1124 14:57:05.927258 5039 generic.go:334] "Generic (PLEG): container finished" podID="48c404b7-421e-4fe9-a981-4246d09cf6d6" containerID="b06d8f310ae0670d2873784e92a88fd92323cc109c83916c4e2dee59514d4e1f" exitCode=0 Nov 24 14:57:05 crc kubenswrapper[5039]: I1124 14:57:05.927359 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d8mdk" event={"ID":"48c404b7-421e-4fe9-a981-4246d09cf6d6","Type":"ContainerDied","Data":"b06d8f310ae0670d2873784e92a88fd92323cc109c83916c4e2dee59514d4e1f"} Nov 24 14:57:06 crc kubenswrapper[5039]: I1124 14:57:06.307896 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:57:06 crc kubenswrapper[5039]: E1124 14:57:06.308444 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 14:57:06 crc kubenswrapper[5039]: I1124 14:57:06.940452 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d8mdk" event={"ID":"48c404b7-421e-4fe9-a981-4246d09cf6d6","Type":"ContainerStarted","Data":"fd9fd83a0cb37a422d30a0d7ef175eb0d358e6ed8b1493dced61788a9fb19e32"} Nov 24 14:57:07 crc kubenswrapper[5039]: I1124 14:57:07.956398 5039 generic.go:334] "Generic (PLEG): container finished" podID="48c404b7-421e-4fe9-a981-4246d09cf6d6" containerID="fd9fd83a0cb37a422d30a0d7ef175eb0d358e6ed8b1493dced61788a9fb19e32" exitCode=0 Nov 24 14:57:07 crc kubenswrapper[5039]: I1124 14:57:07.956521 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d8mdk" event={"ID":"48c404b7-421e-4fe9-a981-4246d09cf6d6","Type":"ContainerDied","Data":"fd9fd83a0cb37a422d30a0d7ef175eb0d358e6ed8b1493dced61788a9fb19e32"} Nov 24 14:57:08 crc kubenswrapper[5039]: I1124 14:57:08.976772 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d8mdk" event={"ID":"48c404b7-421e-4fe9-a981-4246d09cf6d6","Type":"ContainerStarted","Data":"a8298b93e7a0ec66b066b2177567b31f94c166aaed97572039aaf2fdab6ffbe4"} Nov 24 14:57:08 crc kubenswrapper[5039]: I1124 14:57:08.995225 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-d8mdk" podStartSLOduration=3.562416104 podStartE2EDuration="5.995207278s" podCreationTimestamp="2025-11-24 14:57:03 +0000 UTC" firstStartedPulling="2025-11-24 14:57:05.929188804 +0000 UTC m=+5938.368313304" lastFinishedPulling="2025-11-24 14:57:08.361979978 +0000 UTC m=+5940.801104478" observedRunningTime="2025-11-24 14:57:08.992885802 +0000 UTC m=+5941.432010322" watchObservedRunningTime="2025-11-24 14:57:08.995207278 +0000 UTC m=+5941.434331768" Nov 24 14:57:13 crc kubenswrapper[5039]: I1124 14:57:13.945240 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-d8mdk" Nov 24 14:57:13 crc kubenswrapper[5039]: I1124 14:57:13.947170 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-d8mdk" Nov 24 14:57:14 crc kubenswrapper[5039]: I1124 14:57:14.007194 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-d8mdk" Nov 24 14:57:14 crc kubenswrapper[5039]: I1124 14:57:14.088792 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-d8mdk" Nov 24 14:57:14 crc kubenswrapper[5039]: I1124 14:57:14.244291 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d8mdk"] Nov 24 14:57:16 crc kubenswrapper[5039]: I1124 14:57:16.053081 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-d8mdk" podUID="48c404b7-421e-4fe9-a981-4246d09cf6d6" containerName="registry-server" containerID="cri-o://a8298b93e7a0ec66b066b2177567b31f94c166aaed97572039aaf2fdab6ffbe4" gracePeriod=2 Nov 24 14:57:16 crc kubenswrapper[5039]: I1124 14:57:16.718792 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d8mdk" Nov 24 14:57:16 crc kubenswrapper[5039]: I1124 14:57:16.805211 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48c404b7-421e-4fe9-a981-4246d09cf6d6-utilities\") pod \"48c404b7-421e-4fe9-a981-4246d09cf6d6\" (UID: \"48c404b7-421e-4fe9-a981-4246d09cf6d6\") " Nov 24 14:57:16 crc kubenswrapper[5039]: I1124 14:57:16.805444 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45fq6\" (UniqueName: \"kubernetes.io/projected/48c404b7-421e-4fe9-a981-4246d09cf6d6-kube-api-access-45fq6\") pod \"48c404b7-421e-4fe9-a981-4246d09cf6d6\" (UID: \"48c404b7-421e-4fe9-a981-4246d09cf6d6\") " Nov 24 14:57:16 crc kubenswrapper[5039]: I1124 14:57:16.805472 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48c404b7-421e-4fe9-a981-4246d09cf6d6-catalog-content\") pod \"48c404b7-421e-4fe9-a981-4246d09cf6d6\" (UID: \"48c404b7-421e-4fe9-a981-4246d09cf6d6\") " Nov 24 14:57:16 crc kubenswrapper[5039]: I1124 14:57:16.806307 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/48c404b7-421e-4fe9-a981-4246d09cf6d6-utilities" (OuterVolumeSpecName: "utilities") pod "48c404b7-421e-4fe9-a981-4246d09cf6d6" (UID: "48c404b7-421e-4fe9-a981-4246d09cf6d6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:57:16 crc kubenswrapper[5039]: I1124 14:57:16.816825 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48c404b7-421e-4fe9-a981-4246d09cf6d6-kube-api-access-45fq6" (OuterVolumeSpecName: "kube-api-access-45fq6") pod "48c404b7-421e-4fe9-a981-4246d09cf6d6" (UID: "48c404b7-421e-4fe9-a981-4246d09cf6d6"). InnerVolumeSpecName "kube-api-access-45fq6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:57:16 crc kubenswrapper[5039]: I1124 14:57:16.823198 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/48c404b7-421e-4fe9-a981-4246d09cf6d6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "48c404b7-421e-4fe9-a981-4246d09cf6d6" (UID: "48c404b7-421e-4fe9-a981-4246d09cf6d6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:57:16 crc kubenswrapper[5039]: I1124 14:57:16.911420 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45fq6\" (UniqueName: \"kubernetes.io/projected/48c404b7-421e-4fe9-a981-4246d09cf6d6-kube-api-access-45fq6\") on node \"crc\" DevicePath \"\"" Nov 24 14:57:16 crc kubenswrapper[5039]: I1124 14:57:16.911457 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48c404b7-421e-4fe9-a981-4246d09cf6d6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:57:16 crc kubenswrapper[5039]: I1124 14:57:16.911470 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48c404b7-421e-4fe9-a981-4246d09cf6d6-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:57:17 crc kubenswrapper[5039]: I1124 14:57:17.067469 5039 generic.go:334] "Generic (PLEG): container finished" podID="48c404b7-421e-4fe9-a981-4246d09cf6d6" containerID="a8298b93e7a0ec66b066b2177567b31f94c166aaed97572039aaf2fdab6ffbe4" exitCode=0 Nov 24 14:57:17 crc kubenswrapper[5039]: I1124 14:57:17.067540 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d8mdk" event={"ID":"48c404b7-421e-4fe9-a981-4246d09cf6d6","Type":"ContainerDied","Data":"a8298b93e7a0ec66b066b2177567b31f94c166aaed97572039aaf2fdab6ffbe4"} Nov 24 14:57:17 crc kubenswrapper[5039]: I1124 14:57:17.067606 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d8mdk" event={"ID":"48c404b7-421e-4fe9-a981-4246d09cf6d6","Type":"ContainerDied","Data":"f79be13c2f23891fb75f865c62e1a97324013c65851262051d541bf89e97d283"} Nov 24 14:57:17 crc kubenswrapper[5039]: I1124 14:57:17.067617 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d8mdk" Nov 24 14:57:17 crc kubenswrapper[5039]: I1124 14:57:17.067630 5039 scope.go:117] "RemoveContainer" containerID="a8298b93e7a0ec66b066b2177567b31f94c166aaed97572039aaf2fdab6ffbe4" Nov 24 14:57:17 crc kubenswrapper[5039]: I1124 14:57:17.101600 5039 scope.go:117] "RemoveContainer" containerID="fd9fd83a0cb37a422d30a0d7ef175eb0d358e6ed8b1493dced61788a9fb19e32" Nov 24 14:57:17 crc kubenswrapper[5039]: I1124 14:57:17.116567 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d8mdk"] Nov 24 14:57:17 crc kubenswrapper[5039]: I1124 14:57:17.132226 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-d8mdk"] Nov 24 14:57:17 crc kubenswrapper[5039]: I1124 14:57:17.143811 5039 scope.go:117] "RemoveContainer" containerID="b06d8f310ae0670d2873784e92a88fd92323cc109c83916c4e2dee59514d4e1f" Nov 24 14:57:17 crc kubenswrapper[5039]: I1124 14:57:17.196344 5039 scope.go:117] "RemoveContainer" containerID="a8298b93e7a0ec66b066b2177567b31f94c166aaed97572039aaf2fdab6ffbe4" Nov 24 14:57:17 crc kubenswrapper[5039]: E1124 14:57:17.199597 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8298b93e7a0ec66b066b2177567b31f94c166aaed97572039aaf2fdab6ffbe4\": container with ID starting with a8298b93e7a0ec66b066b2177567b31f94c166aaed97572039aaf2fdab6ffbe4 not found: ID does not exist" containerID="a8298b93e7a0ec66b066b2177567b31f94c166aaed97572039aaf2fdab6ffbe4" Nov 24 14:57:17 crc kubenswrapper[5039]: I1124 14:57:17.199689 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8298b93e7a0ec66b066b2177567b31f94c166aaed97572039aaf2fdab6ffbe4"} err="failed to get container status \"a8298b93e7a0ec66b066b2177567b31f94c166aaed97572039aaf2fdab6ffbe4\": rpc error: code = NotFound desc = could not find container \"a8298b93e7a0ec66b066b2177567b31f94c166aaed97572039aaf2fdab6ffbe4\": container with ID starting with a8298b93e7a0ec66b066b2177567b31f94c166aaed97572039aaf2fdab6ffbe4 not found: ID does not exist" Nov 24 14:57:17 crc kubenswrapper[5039]: I1124 14:57:17.199750 5039 scope.go:117] "RemoveContainer" containerID="fd9fd83a0cb37a422d30a0d7ef175eb0d358e6ed8b1493dced61788a9fb19e32" Nov 24 14:57:17 crc kubenswrapper[5039]: E1124 14:57:17.200228 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd9fd83a0cb37a422d30a0d7ef175eb0d358e6ed8b1493dced61788a9fb19e32\": container with ID starting with fd9fd83a0cb37a422d30a0d7ef175eb0d358e6ed8b1493dced61788a9fb19e32 not found: ID does not exist" containerID="fd9fd83a0cb37a422d30a0d7ef175eb0d358e6ed8b1493dced61788a9fb19e32" Nov 24 14:57:17 crc kubenswrapper[5039]: I1124 14:57:17.200266 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd9fd83a0cb37a422d30a0d7ef175eb0d358e6ed8b1493dced61788a9fb19e32"} err="failed to get container status \"fd9fd83a0cb37a422d30a0d7ef175eb0d358e6ed8b1493dced61788a9fb19e32\": rpc error: code = NotFound desc = could not find container \"fd9fd83a0cb37a422d30a0d7ef175eb0d358e6ed8b1493dced61788a9fb19e32\": container with ID starting with fd9fd83a0cb37a422d30a0d7ef175eb0d358e6ed8b1493dced61788a9fb19e32 not found: ID does not exist" Nov 24 14:57:17 crc kubenswrapper[5039]: I1124 14:57:17.200297 5039 scope.go:117] "RemoveContainer" containerID="b06d8f310ae0670d2873784e92a88fd92323cc109c83916c4e2dee59514d4e1f" Nov 24 14:57:17 crc kubenswrapper[5039]: E1124 14:57:17.200492 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b06d8f310ae0670d2873784e92a88fd92323cc109c83916c4e2dee59514d4e1f\": container with ID starting with b06d8f310ae0670d2873784e92a88fd92323cc109c83916c4e2dee59514d4e1f not found: ID does not exist" containerID="b06d8f310ae0670d2873784e92a88fd92323cc109c83916c4e2dee59514d4e1f" Nov 24 14:57:17 crc kubenswrapper[5039]: I1124 14:57:17.200537 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b06d8f310ae0670d2873784e92a88fd92323cc109c83916c4e2dee59514d4e1f"} err="failed to get container status \"b06d8f310ae0670d2873784e92a88fd92323cc109c83916c4e2dee59514d4e1f\": rpc error: code = NotFound desc = could not find container \"b06d8f310ae0670d2873784e92a88fd92323cc109c83916c4e2dee59514d4e1f\": container with ID starting with b06d8f310ae0670d2873784e92a88fd92323cc109c83916c4e2dee59514d4e1f not found: ID does not exist" Nov 24 14:57:18 crc kubenswrapper[5039]: I1124 14:57:18.325094 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="48c404b7-421e-4fe9-a981-4246d09cf6d6" path="/var/lib/kubelet/pods/48c404b7-421e-4fe9-a981-4246d09cf6d6/volumes" Nov 24 14:57:21 crc kubenswrapper[5039]: I1124 14:57:21.307142 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 14:57:22 crc kubenswrapper[5039]: I1124 14:57:22.125342 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"8e1b56e9a59952d23c6d0a5931bf75a97ec7b0035c65a6903040813adc8f08e3"} Nov 24 14:57:30 crc kubenswrapper[5039]: I1124 14:57:30.435239 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-fxvc4"] Nov 24 14:57:30 crc kubenswrapper[5039]: E1124 14:57:30.436105 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48c404b7-421e-4fe9-a981-4246d09cf6d6" containerName="extract-utilities" Nov 24 14:57:30 crc kubenswrapper[5039]: I1124 14:57:30.436117 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="48c404b7-421e-4fe9-a981-4246d09cf6d6" containerName="extract-utilities" Nov 24 14:57:30 crc kubenswrapper[5039]: E1124 14:57:30.436146 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48c404b7-421e-4fe9-a981-4246d09cf6d6" containerName="registry-server" Nov 24 14:57:30 crc kubenswrapper[5039]: I1124 14:57:30.436152 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="48c404b7-421e-4fe9-a981-4246d09cf6d6" containerName="registry-server" Nov 24 14:57:30 crc kubenswrapper[5039]: E1124 14:57:30.436171 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48c404b7-421e-4fe9-a981-4246d09cf6d6" containerName="extract-content" Nov 24 14:57:30 crc kubenswrapper[5039]: I1124 14:57:30.436177 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="48c404b7-421e-4fe9-a981-4246d09cf6d6" containerName="extract-content" Nov 24 14:57:30 crc kubenswrapper[5039]: I1124 14:57:30.436395 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="48c404b7-421e-4fe9-a981-4246d09cf6d6" containerName="registry-server" Nov 24 14:57:30 crc kubenswrapper[5039]: I1124 14:57:30.437974 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fxvc4" Nov 24 14:57:30 crc kubenswrapper[5039]: I1124 14:57:30.451678 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fxvc4"] Nov 24 14:57:30 crc kubenswrapper[5039]: I1124 14:57:30.456540 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f5f0082-dd31-43c7-9e0a-064a8b0f2efe-utilities\") pod \"certified-operators-fxvc4\" (UID: \"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe\") " pod="openshift-marketplace/certified-operators-fxvc4" Nov 24 14:57:30 crc kubenswrapper[5039]: I1124 14:57:30.456638 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f5f0082-dd31-43c7-9e0a-064a8b0f2efe-catalog-content\") pod \"certified-operators-fxvc4\" (UID: \"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe\") " pod="openshift-marketplace/certified-operators-fxvc4" Nov 24 14:57:30 crc kubenswrapper[5039]: I1124 14:57:30.456989 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8gjh\" (UniqueName: \"kubernetes.io/projected/4f5f0082-dd31-43c7-9e0a-064a8b0f2efe-kube-api-access-l8gjh\") pod \"certified-operators-fxvc4\" (UID: \"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe\") " pod="openshift-marketplace/certified-operators-fxvc4" Nov 24 14:57:30 crc kubenswrapper[5039]: I1124 14:57:30.559773 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8gjh\" (UniqueName: \"kubernetes.io/projected/4f5f0082-dd31-43c7-9e0a-064a8b0f2efe-kube-api-access-l8gjh\") pod \"certified-operators-fxvc4\" (UID: \"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe\") " pod="openshift-marketplace/certified-operators-fxvc4" Nov 24 14:57:30 crc kubenswrapper[5039]: I1124 14:57:30.559947 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f5f0082-dd31-43c7-9e0a-064a8b0f2efe-utilities\") pod \"certified-operators-fxvc4\" (UID: \"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe\") " pod="openshift-marketplace/certified-operators-fxvc4" Nov 24 14:57:30 crc kubenswrapper[5039]: I1124 14:57:30.560020 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f5f0082-dd31-43c7-9e0a-064a8b0f2efe-catalog-content\") pod \"certified-operators-fxvc4\" (UID: \"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe\") " pod="openshift-marketplace/certified-operators-fxvc4" Nov 24 14:57:30 crc kubenswrapper[5039]: I1124 14:57:30.560573 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f5f0082-dd31-43c7-9e0a-064a8b0f2efe-utilities\") pod \"certified-operators-fxvc4\" (UID: \"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe\") " pod="openshift-marketplace/certified-operators-fxvc4" Nov 24 14:57:30 crc kubenswrapper[5039]: I1124 14:57:30.560611 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f5f0082-dd31-43c7-9e0a-064a8b0f2efe-catalog-content\") pod \"certified-operators-fxvc4\" (UID: \"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe\") " pod="openshift-marketplace/certified-operators-fxvc4" Nov 24 14:57:30 crc kubenswrapper[5039]: I1124 14:57:30.579930 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8gjh\" (UniqueName: \"kubernetes.io/projected/4f5f0082-dd31-43c7-9e0a-064a8b0f2efe-kube-api-access-l8gjh\") pod \"certified-operators-fxvc4\" (UID: \"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe\") " pod="openshift-marketplace/certified-operators-fxvc4" Nov 24 14:57:30 crc kubenswrapper[5039]: I1124 14:57:30.761750 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fxvc4" Nov 24 14:57:31 crc kubenswrapper[5039]: I1124 14:57:31.402039 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fxvc4"] Nov 24 14:57:32 crc kubenswrapper[5039]: I1124 14:57:32.239397 5039 generic.go:334] "Generic (PLEG): container finished" podID="4f5f0082-dd31-43c7-9e0a-064a8b0f2efe" containerID="17fb1eb5985e45f883944d445957b75261611547c4360f14461b9f3cc277eb39" exitCode=0 Nov 24 14:57:32 crc kubenswrapper[5039]: I1124 14:57:32.239692 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fxvc4" event={"ID":"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe","Type":"ContainerDied","Data":"17fb1eb5985e45f883944d445957b75261611547c4360f14461b9f3cc277eb39"} Nov 24 14:57:32 crc kubenswrapper[5039]: I1124 14:57:32.239723 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fxvc4" event={"ID":"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe","Type":"ContainerStarted","Data":"84b14a17f4d1901c20db1a93cfa703c00ea4b8ca2e10d7f2d3844cf5d5d2965d"} Nov 24 14:57:34 crc kubenswrapper[5039]: I1124 14:57:34.262837 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fxvc4" event={"ID":"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe","Type":"ContainerStarted","Data":"5cc0477a8b19763c8f38e54c3534be173fe822952ffed305d13019955a43f6d0"} Nov 24 14:57:35 crc kubenswrapper[5039]: I1124 14:57:35.277429 5039 generic.go:334] "Generic (PLEG): container finished" podID="4f5f0082-dd31-43c7-9e0a-064a8b0f2efe" containerID="5cc0477a8b19763c8f38e54c3534be173fe822952ffed305d13019955a43f6d0" exitCode=0 Nov 24 14:57:35 crc kubenswrapper[5039]: I1124 14:57:35.277761 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fxvc4" event={"ID":"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe","Type":"ContainerDied","Data":"5cc0477a8b19763c8f38e54c3534be173fe822952ffed305d13019955a43f6d0"} Nov 24 14:57:36 crc kubenswrapper[5039]: I1124 14:57:36.290790 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fxvc4" event={"ID":"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe","Type":"ContainerStarted","Data":"d828fb3df9ae73dc69627ad19bbac779d86d47afbc5a761067daecd64c54887d"} Nov 24 14:57:36 crc kubenswrapper[5039]: I1124 14:57:36.326958 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-fxvc4" podStartSLOduration=2.850519882 podStartE2EDuration="6.326940171s" podCreationTimestamp="2025-11-24 14:57:30 +0000 UTC" firstStartedPulling="2025-11-24 14:57:32.242146561 +0000 UTC m=+5964.681271061" lastFinishedPulling="2025-11-24 14:57:35.71856686 +0000 UTC m=+5968.157691350" observedRunningTime="2025-11-24 14:57:36.326582602 +0000 UTC m=+5968.765707102" watchObservedRunningTime="2025-11-24 14:57:36.326940171 +0000 UTC m=+5968.766064671" Nov 24 14:57:40 crc kubenswrapper[5039]: I1124 14:57:40.762662 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-fxvc4" Nov 24 14:57:40 crc kubenswrapper[5039]: I1124 14:57:40.764770 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-fxvc4" Nov 24 14:57:40 crc kubenswrapper[5039]: I1124 14:57:40.818077 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-fxvc4" Nov 24 14:57:41 crc kubenswrapper[5039]: I1124 14:57:41.397611 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-fxvc4" Nov 24 14:57:41 crc kubenswrapper[5039]: I1124 14:57:41.448842 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fxvc4"] Nov 24 14:57:43 crc kubenswrapper[5039]: I1124 14:57:43.375314 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-fxvc4" podUID="4f5f0082-dd31-43c7-9e0a-064a8b0f2efe" containerName="registry-server" containerID="cri-o://d828fb3df9ae73dc69627ad19bbac779d86d47afbc5a761067daecd64c54887d" gracePeriod=2 Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.182047 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fxvc4" Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.208424 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l8gjh\" (UniqueName: \"kubernetes.io/projected/4f5f0082-dd31-43c7-9e0a-064a8b0f2efe-kube-api-access-l8gjh\") pod \"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe\" (UID: \"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe\") " Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.208544 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f5f0082-dd31-43c7-9e0a-064a8b0f2efe-utilities\") pod \"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe\" (UID: \"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe\") " Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.208738 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f5f0082-dd31-43c7-9e0a-064a8b0f2efe-catalog-content\") pod \"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe\" (UID: \"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe\") " Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.210080 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f5f0082-dd31-43c7-9e0a-064a8b0f2efe-utilities" (OuterVolumeSpecName: "utilities") pod "4f5f0082-dd31-43c7-9e0a-064a8b0f2efe" (UID: "4f5f0082-dd31-43c7-9e0a-064a8b0f2efe"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.216789 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f5f0082-dd31-43c7-9e0a-064a8b0f2efe-kube-api-access-l8gjh" (OuterVolumeSpecName: "kube-api-access-l8gjh") pod "4f5f0082-dd31-43c7-9e0a-064a8b0f2efe" (UID: "4f5f0082-dd31-43c7-9e0a-064a8b0f2efe"). InnerVolumeSpecName "kube-api-access-l8gjh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.265055 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f5f0082-dd31-43c7-9e0a-064a8b0f2efe-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4f5f0082-dd31-43c7-9e0a-064a8b0f2efe" (UID: "4f5f0082-dd31-43c7-9e0a-064a8b0f2efe"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.312078 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l8gjh\" (UniqueName: \"kubernetes.io/projected/4f5f0082-dd31-43c7-9e0a-064a8b0f2efe-kube-api-access-l8gjh\") on node \"crc\" DevicePath \"\"" Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.312398 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f5f0082-dd31-43c7-9e0a-064a8b0f2efe-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.312408 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f5f0082-dd31-43c7-9e0a-064a8b0f2efe-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.390184 5039 generic.go:334] "Generic (PLEG): container finished" podID="4f5f0082-dd31-43c7-9e0a-064a8b0f2efe" containerID="d828fb3df9ae73dc69627ad19bbac779d86d47afbc5a761067daecd64c54887d" exitCode=0 Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.390233 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fxvc4" event={"ID":"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe","Type":"ContainerDied","Data":"d828fb3df9ae73dc69627ad19bbac779d86d47afbc5a761067daecd64c54887d"} Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.390238 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fxvc4" Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.390273 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fxvc4" event={"ID":"4f5f0082-dd31-43c7-9e0a-064a8b0f2efe","Type":"ContainerDied","Data":"84b14a17f4d1901c20db1a93cfa703c00ea4b8ca2e10d7f2d3844cf5d5d2965d"} Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.390295 5039 scope.go:117] "RemoveContainer" containerID="d828fb3df9ae73dc69627ad19bbac779d86d47afbc5a761067daecd64c54887d" Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.420869 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fxvc4"] Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.429452 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-fxvc4"] Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.432233 5039 scope.go:117] "RemoveContainer" containerID="5cc0477a8b19763c8f38e54c3534be173fe822952ffed305d13019955a43f6d0" Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.469420 5039 scope.go:117] "RemoveContainer" containerID="17fb1eb5985e45f883944d445957b75261611547c4360f14461b9f3cc277eb39" Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.512431 5039 scope.go:117] "RemoveContainer" containerID="d828fb3df9ae73dc69627ad19bbac779d86d47afbc5a761067daecd64c54887d" Nov 24 14:57:44 crc kubenswrapper[5039]: E1124 14:57:44.512937 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d828fb3df9ae73dc69627ad19bbac779d86d47afbc5a761067daecd64c54887d\": container with ID starting with d828fb3df9ae73dc69627ad19bbac779d86d47afbc5a761067daecd64c54887d not found: ID does not exist" containerID="d828fb3df9ae73dc69627ad19bbac779d86d47afbc5a761067daecd64c54887d" Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.512991 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d828fb3df9ae73dc69627ad19bbac779d86d47afbc5a761067daecd64c54887d"} err="failed to get container status \"d828fb3df9ae73dc69627ad19bbac779d86d47afbc5a761067daecd64c54887d\": rpc error: code = NotFound desc = could not find container \"d828fb3df9ae73dc69627ad19bbac779d86d47afbc5a761067daecd64c54887d\": container with ID starting with d828fb3df9ae73dc69627ad19bbac779d86d47afbc5a761067daecd64c54887d not found: ID does not exist" Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.513017 5039 scope.go:117] "RemoveContainer" containerID="5cc0477a8b19763c8f38e54c3534be173fe822952ffed305d13019955a43f6d0" Nov 24 14:57:44 crc kubenswrapper[5039]: E1124 14:57:44.513579 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cc0477a8b19763c8f38e54c3534be173fe822952ffed305d13019955a43f6d0\": container with ID starting with 5cc0477a8b19763c8f38e54c3534be173fe822952ffed305d13019955a43f6d0 not found: ID does not exist" containerID="5cc0477a8b19763c8f38e54c3534be173fe822952ffed305d13019955a43f6d0" Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.513641 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cc0477a8b19763c8f38e54c3534be173fe822952ffed305d13019955a43f6d0"} err="failed to get container status \"5cc0477a8b19763c8f38e54c3534be173fe822952ffed305d13019955a43f6d0\": rpc error: code = NotFound desc = could not find container \"5cc0477a8b19763c8f38e54c3534be173fe822952ffed305d13019955a43f6d0\": container with ID starting with 5cc0477a8b19763c8f38e54c3534be173fe822952ffed305d13019955a43f6d0 not found: ID does not exist" Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.513667 5039 scope.go:117] "RemoveContainer" containerID="17fb1eb5985e45f883944d445957b75261611547c4360f14461b9f3cc277eb39" Nov 24 14:57:44 crc kubenswrapper[5039]: E1124 14:57:44.513976 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17fb1eb5985e45f883944d445957b75261611547c4360f14461b9f3cc277eb39\": container with ID starting with 17fb1eb5985e45f883944d445957b75261611547c4360f14461b9f3cc277eb39 not found: ID does not exist" containerID="17fb1eb5985e45f883944d445957b75261611547c4360f14461b9f3cc277eb39" Nov 24 14:57:44 crc kubenswrapper[5039]: I1124 14:57:44.514020 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17fb1eb5985e45f883944d445957b75261611547c4360f14461b9f3cc277eb39"} err="failed to get container status \"17fb1eb5985e45f883944d445957b75261611547c4360f14461b9f3cc277eb39\": rpc error: code = NotFound desc = could not find container \"17fb1eb5985e45f883944d445957b75261611547c4360f14461b9f3cc277eb39\": container with ID starting with 17fb1eb5985e45f883944d445957b75261611547c4360f14461b9f3cc277eb39 not found: ID does not exist" Nov 24 14:57:46 crc kubenswrapper[5039]: I1124 14:57:46.329231 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f5f0082-dd31-43c7-9e0a-064a8b0f2efe" path="/var/lib/kubelet/pods/4f5f0082-dd31-43c7-9e0a-064a8b0f2efe/volumes" Nov 24 14:59:50 crc kubenswrapper[5039]: I1124 14:59:50.101902 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 14:59:50 crc kubenswrapper[5039]: I1124 14:59:50.102925 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 15:00:00 crc kubenswrapper[5039]: I1124 15:00:00.182023 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399940-77d45"] Nov 24 15:00:00 crc kubenswrapper[5039]: E1124 15:00:00.183164 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f5f0082-dd31-43c7-9e0a-064a8b0f2efe" containerName="extract-utilities" Nov 24 15:00:00 crc kubenswrapper[5039]: I1124 15:00:00.183179 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f5f0082-dd31-43c7-9e0a-064a8b0f2efe" containerName="extract-utilities" Nov 24 15:00:00 crc kubenswrapper[5039]: E1124 15:00:00.183192 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f5f0082-dd31-43c7-9e0a-064a8b0f2efe" containerName="extract-content" Nov 24 15:00:00 crc kubenswrapper[5039]: I1124 15:00:00.183198 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f5f0082-dd31-43c7-9e0a-064a8b0f2efe" containerName="extract-content" Nov 24 15:00:00 crc kubenswrapper[5039]: E1124 15:00:00.183221 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f5f0082-dd31-43c7-9e0a-064a8b0f2efe" containerName="registry-server" Nov 24 15:00:00 crc kubenswrapper[5039]: I1124 15:00:00.183227 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f5f0082-dd31-43c7-9e0a-064a8b0f2efe" containerName="registry-server" Nov 24 15:00:00 crc kubenswrapper[5039]: I1124 15:00:00.183436 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f5f0082-dd31-43c7-9e0a-064a8b0f2efe" containerName="registry-server" Nov 24 15:00:00 crc kubenswrapper[5039]: I1124 15:00:00.184263 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399940-77d45" Nov 24 15:00:00 crc kubenswrapper[5039]: I1124 15:00:00.186818 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 15:00:00 crc kubenswrapper[5039]: I1124 15:00:00.186824 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 15:00:00 crc kubenswrapper[5039]: I1124 15:00:00.208639 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399940-77d45"] Nov 24 15:00:00 crc kubenswrapper[5039]: I1124 15:00:00.291812 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5z6d5\" (UniqueName: \"kubernetes.io/projected/967628bb-eec1-4ada-b4b3-3488b8adc5f6-kube-api-access-5z6d5\") pod \"collect-profiles-29399940-77d45\" (UID: \"967628bb-eec1-4ada-b4b3-3488b8adc5f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399940-77d45" Nov 24 15:00:00 crc kubenswrapper[5039]: I1124 15:00:00.291916 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/967628bb-eec1-4ada-b4b3-3488b8adc5f6-secret-volume\") pod \"collect-profiles-29399940-77d45\" (UID: \"967628bb-eec1-4ada-b4b3-3488b8adc5f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399940-77d45" Nov 24 15:00:00 crc kubenswrapper[5039]: I1124 15:00:00.291962 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/967628bb-eec1-4ada-b4b3-3488b8adc5f6-config-volume\") pod \"collect-profiles-29399940-77d45\" (UID: \"967628bb-eec1-4ada-b4b3-3488b8adc5f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399940-77d45" Nov 24 15:00:00 crc kubenswrapper[5039]: I1124 15:00:00.394568 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5z6d5\" (UniqueName: \"kubernetes.io/projected/967628bb-eec1-4ada-b4b3-3488b8adc5f6-kube-api-access-5z6d5\") pod \"collect-profiles-29399940-77d45\" (UID: \"967628bb-eec1-4ada-b4b3-3488b8adc5f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399940-77d45" Nov 24 15:00:00 crc kubenswrapper[5039]: I1124 15:00:00.394860 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/967628bb-eec1-4ada-b4b3-3488b8adc5f6-secret-volume\") pod \"collect-profiles-29399940-77d45\" (UID: \"967628bb-eec1-4ada-b4b3-3488b8adc5f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399940-77d45" Nov 24 15:00:00 crc kubenswrapper[5039]: I1124 15:00:00.395006 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/967628bb-eec1-4ada-b4b3-3488b8adc5f6-config-volume\") pod \"collect-profiles-29399940-77d45\" (UID: \"967628bb-eec1-4ada-b4b3-3488b8adc5f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399940-77d45" Nov 24 15:00:00 crc kubenswrapper[5039]: I1124 15:00:00.396195 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/967628bb-eec1-4ada-b4b3-3488b8adc5f6-config-volume\") pod \"collect-profiles-29399940-77d45\" (UID: \"967628bb-eec1-4ada-b4b3-3488b8adc5f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399940-77d45" Nov 24 15:00:00 crc kubenswrapper[5039]: I1124 15:00:00.402041 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/967628bb-eec1-4ada-b4b3-3488b8adc5f6-secret-volume\") pod \"collect-profiles-29399940-77d45\" (UID: \"967628bb-eec1-4ada-b4b3-3488b8adc5f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399940-77d45" Nov 24 15:00:00 crc kubenswrapper[5039]: I1124 15:00:00.413768 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5z6d5\" (UniqueName: \"kubernetes.io/projected/967628bb-eec1-4ada-b4b3-3488b8adc5f6-kube-api-access-5z6d5\") pod \"collect-profiles-29399940-77d45\" (UID: \"967628bb-eec1-4ada-b4b3-3488b8adc5f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399940-77d45" Nov 24 15:00:00 crc kubenswrapper[5039]: I1124 15:00:00.512470 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399940-77d45" Nov 24 15:00:01 crc kubenswrapper[5039]: I1124 15:00:01.117253 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399940-77d45"] Nov 24 15:00:01 crc kubenswrapper[5039]: I1124 15:00:01.877423 5039 generic.go:334] "Generic (PLEG): container finished" podID="967628bb-eec1-4ada-b4b3-3488b8adc5f6" containerID="1125d3b0f23911bbc1e74290c79dfb0d2002235dfda7b1da6c78d48d23f6ea2e" exitCode=0 Nov 24 15:00:01 crc kubenswrapper[5039]: I1124 15:00:01.877520 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399940-77d45" event={"ID":"967628bb-eec1-4ada-b4b3-3488b8adc5f6","Type":"ContainerDied","Data":"1125d3b0f23911bbc1e74290c79dfb0d2002235dfda7b1da6c78d48d23f6ea2e"} Nov 24 15:00:01 crc kubenswrapper[5039]: I1124 15:00:01.877801 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399940-77d45" event={"ID":"967628bb-eec1-4ada-b4b3-3488b8adc5f6","Type":"ContainerStarted","Data":"50c5adaa02d1602540aa19d771560b18da82fe1aacc60f8794fca48b08876caa"} Nov 24 15:00:03 crc kubenswrapper[5039]: I1124 15:00:03.402998 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399940-77d45" Nov 24 15:00:03 crc kubenswrapper[5039]: I1124 15:00:03.470540 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/967628bb-eec1-4ada-b4b3-3488b8adc5f6-config-volume\") pod \"967628bb-eec1-4ada-b4b3-3488b8adc5f6\" (UID: \"967628bb-eec1-4ada-b4b3-3488b8adc5f6\") " Nov 24 15:00:03 crc kubenswrapper[5039]: I1124 15:00:03.470786 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/967628bb-eec1-4ada-b4b3-3488b8adc5f6-secret-volume\") pod \"967628bb-eec1-4ada-b4b3-3488b8adc5f6\" (UID: \"967628bb-eec1-4ada-b4b3-3488b8adc5f6\") " Nov 24 15:00:03 crc kubenswrapper[5039]: I1124 15:00:03.470892 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5z6d5\" (UniqueName: \"kubernetes.io/projected/967628bb-eec1-4ada-b4b3-3488b8adc5f6-kube-api-access-5z6d5\") pod \"967628bb-eec1-4ada-b4b3-3488b8adc5f6\" (UID: \"967628bb-eec1-4ada-b4b3-3488b8adc5f6\") " Nov 24 15:00:03 crc kubenswrapper[5039]: I1124 15:00:03.471100 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/967628bb-eec1-4ada-b4b3-3488b8adc5f6-config-volume" (OuterVolumeSpecName: "config-volume") pod "967628bb-eec1-4ada-b4b3-3488b8adc5f6" (UID: "967628bb-eec1-4ada-b4b3-3488b8adc5f6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 15:00:03 crc kubenswrapper[5039]: I1124 15:00:03.472041 5039 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/967628bb-eec1-4ada-b4b3-3488b8adc5f6-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 15:00:03 crc kubenswrapper[5039]: I1124 15:00:03.478552 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/967628bb-eec1-4ada-b4b3-3488b8adc5f6-kube-api-access-5z6d5" (OuterVolumeSpecName: "kube-api-access-5z6d5") pod "967628bb-eec1-4ada-b4b3-3488b8adc5f6" (UID: "967628bb-eec1-4ada-b4b3-3488b8adc5f6"). InnerVolumeSpecName "kube-api-access-5z6d5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:00:03 crc kubenswrapper[5039]: I1124 15:00:03.479337 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/967628bb-eec1-4ada-b4b3-3488b8adc5f6-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "967628bb-eec1-4ada-b4b3-3488b8adc5f6" (UID: "967628bb-eec1-4ada-b4b3-3488b8adc5f6"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 15:00:03 crc kubenswrapper[5039]: I1124 15:00:03.574819 5039 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/967628bb-eec1-4ada-b4b3-3488b8adc5f6-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 15:00:03 crc kubenswrapper[5039]: I1124 15:00:03.574854 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5z6d5\" (UniqueName: \"kubernetes.io/projected/967628bb-eec1-4ada-b4b3-3488b8adc5f6-kube-api-access-5z6d5\") on node \"crc\" DevicePath \"\"" Nov 24 15:00:03 crc kubenswrapper[5039]: I1124 15:00:03.901380 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399940-77d45" event={"ID":"967628bb-eec1-4ada-b4b3-3488b8adc5f6","Type":"ContainerDied","Data":"50c5adaa02d1602540aa19d771560b18da82fe1aacc60f8794fca48b08876caa"} Nov 24 15:00:03 crc kubenswrapper[5039]: I1124 15:00:03.901747 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="50c5adaa02d1602540aa19d771560b18da82fe1aacc60f8794fca48b08876caa" Nov 24 15:00:03 crc kubenswrapper[5039]: I1124 15:00:03.901470 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399940-77d45" Nov 24 15:00:04 crc kubenswrapper[5039]: I1124 15:00:04.504839 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399895-l4dvm"] Nov 24 15:00:04 crc kubenswrapper[5039]: I1124 15:00:04.518102 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399895-l4dvm"] Nov 24 15:00:06 crc kubenswrapper[5039]: I1124 15:00:06.325863 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8dd6f7c3-881e-4acb-9563-af143fdea78e" path="/var/lib/kubelet/pods/8dd6f7c3-881e-4acb-9563-af143fdea78e/volumes" Nov 24 15:00:20 crc kubenswrapper[5039]: I1124 15:00:20.101123 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 15:00:20 crc kubenswrapper[5039]: I1124 15:00:20.101887 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 15:00:50 crc kubenswrapper[5039]: I1124 15:00:50.101580 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 15:00:50 crc kubenswrapper[5039]: I1124 15:00:50.102362 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 15:00:50 crc kubenswrapper[5039]: I1124 15:00:50.102426 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 15:00:50 crc kubenswrapper[5039]: I1124 15:00:50.104150 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8e1b56e9a59952d23c6d0a5931bf75a97ec7b0035c65a6903040813adc8f08e3"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 15:00:50 crc kubenswrapper[5039]: I1124 15:00:50.104245 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://8e1b56e9a59952d23c6d0a5931bf75a97ec7b0035c65a6903040813adc8f08e3" gracePeriod=600 Nov 24 15:00:50 crc kubenswrapper[5039]: I1124 15:00:50.411227 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="8e1b56e9a59952d23c6d0a5931bf75a97ec7b0035c65a6903040813adc8f08e3" exitCode=0 Nov 24 15:00:50 crc kubenswrapper[5039]: I1124 15:00:50.411310 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"8e1b56e9a59952d23c6d0a5931bf75a97ec7b0035c65a6903040813adc8f08e3"} Nov 24 15:00:50 crc kubenswrapper[5039]: I1124 15:00:50.411833 5039 scope.go:117] "RemoveContainer" containerID="e0664edca0f97d9f9334e7e9d126069a19d78c5d224b18c7d1f051a7478edaf7" Nov 24 15:00:51 crc kubenswrapper[5039]: I1124 15:00:51.442496 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b"} Nov 24 15:00:56 crc kubenswrapper[5039]: I1124 15:00:56.547629 5039 scope.go:117] "RemoveContainer" containerID="f67ce7c9795a4ea3eef0e343c0e79040e0de30dde476d91dcfdebe81e36cb04b" Nov 24 15:01:00 crc kubenswrapper[5039]: I1124 15:01:00.156620 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29399941-srhkf"] Nov 24 15:01:00 crc kubenswrapper[5039]: E1124 15:01:00.158728 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="967628bb-eec1-4ada-b4b3-3488b8adc5f6" containerName="collect-profiles" Nov 24 15:01:00 crc kubenswrapper[5039]: I1124 15:01:00.158819 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="967628bb-eec1-4ada-b4b3-3488b8adc5f6" containerName="collect-profiles" Nov 24 15:01:00 crc kubenswrapper[5039]: I1124 15:01:00.159143 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="967628bb-eec1-4ada-b4b3-3488b8adc5f6" containerName="collect-profiles" Nov 24 15:01:00 crc kubenswrapper[5039]: I1124 15:01:00.160072 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29399941-srhkf" Nov 24 15:01:00 crc kubenswrapper[5039]: I1124 15:01:00.170433 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29399941-srhkf"] Nov 24 15:01:00 crc kubenswrapper[5039]: I1124 15:01:00.199206 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e88464ce-c201-4ce0-831a-bad31b599341-combined-ca-bundle\") pod \"keystone-cron-29399941-srhkf\" (UID: \"e88464ce-c201-4ce0-831a-bad31b599341\") " pod="openstack/keystone-cron-29399941-srhkf" Nov 24 15:01:00 crc kubenswrapper[5039]: I1124 15:01:00.199399 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e88464ce-c201-4ce0-831a-bad31b599341-config-data\") pod \"keystone-cron-29399941-srhkf\" (UID: \"e88464ce-c201-4ce0-831a-bad31b599341\") " pod="openstack/keystone-cron-29399941-srhkf" Nov 24 15:01:00 crc kubenswrapper[5039]: I1124 15:01:00.199453 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e88464ce-c201-4ce0-831a-bad31b599341-fernet-keys\") pod \"keystone-cron-29399941-srhkf\" (UID: \"e88464ce-c201-4ce0-831a-bad31b599341\") " pod="openstack/keystone-cron-29399941-srhkf" Nov 24 15:01:00 crc kubenswrapper[5039]: I1124 15:01:00.199602 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nqch\" (UniqueName: \"kubernetes.io/projected/e88464ce-c201-4ce0-831a-bad31b599341-kube-api-access-5nqch\") pod \"keystone-cron-29399941-srhkf\" (UID: \"e88464ce-c201-4ce0-831a-bad31b599341\") " pod="openstack/keystone-cron-29399941-srhkf" Nov 24 15:01:00 crc kubenswrapper[5039]: I1124 15:01:00.301346 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e88464ce-c201-4ce0-831a-bad31b599341-combined-ca-bundle\") pod \"keystone-cron-29399941-srhkf\" (UID: \"e88464ce-c201-4ce0-831a-bad31b599341\") " pod="openstack/keystone-cron-29399941-srhkf" Nov 24 15:01:00 crc kubenswrapper[5039]: I1124 15:01:00.301678 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e88464ce-c201-4ce0-831a-bad31b599341-config-data\") pod \"keystone-cron-29399941-srhkf\" (UID: \"e88464ce-c201-4ce0-831a-bad31b599341\") " pod="openstack/keystone-cron-29399941-srhkf" Nov 24 15:01:00 crc kubenswrapper[5039]: I1124 15:01:00.301707 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e88464ce-c201-4ce0-831a-bad31b599341-fernet-keys\") pod \"keystone-cron-29399941-srhkf\" (UID: \"e88464ce-c201-4ce0-831a-bad31b599341\") " pod="openstack/keystone-cron-29399941-srhkf" Nov 24 15:01:00 crc kubenswrapper[5039]: I1124 15:01:00.301769 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nqch\" (UniqueName: \"kubernetes.io/projected/e88464ce-c201-4ce0-831a-bad31b599341-kube-api-access-5nqch\") pod \"keystone-cron-29399941-srhkf\" (UID: \"e88464ce-c201-4ce0-831a-bad31b599341\") " pod="openstack/keystone-cron-29399941-srhkf" Nov 24 15:01:00 crc kubenswrapper[5039]: I1124 15:01:00.308583 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e88464ce-c201-4ce0-831a-bad31b599341-fernet-keys\") pod \"keystone-cron-29399941-srhkf\" (UID: \"e88464ce-c201-4ce0-831a-bad31b599341\") " pod="openstack/keystone-cron-29399941-srhkf" Nov 24 15:01:00 crc kubenswrapper[5039]: I1124 15:01:00.310420 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e88464ce-c201-4ce0-831a-bad31b599341-config-data\") pod \"keystone-cron-29399941-srhkf\" (UID: \"e88464ce-c201-4ce0-831a-bad31b599341\") " pod="openstack/keystone-cron-29399941-srhkf" Nov 24 15:01:00 crc kubenswrapper[5039]: I1124 15:01:00.314720 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e88464ce-c201-4ce0-831a-bad31b599341-combined-ca-bundle\") pod \"keystone-cron-29399941-srhkf\" (UID: \"e88464ce-c201-4ce0-831a-bad31b599341\") " pod="openstack/keystone-cron-29399941-srhkf" Nov 24 15:01:00 crc kubenswrapper[5039]: I1124 15:01:00.324845 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nqch\" (UniqueName: \"kubernetes.io/projected/e88464ce-c201-4ce0-831a-bad31b599341-kube-api-access-5nqch\") pod \"keystone-cron-29399941-srhkf\" (UID: \"e88464ce-c201-4ce0-831a-bad31b599341\") " pod="openstack/keystone-cron-29399941-srhkf" Nov 24 15:01:00 crc kubenswrapper[5039]: I1124 15:01:00.487271 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29399941-srhkf" Nov 24 15:01:00 crc kubenswrapper[5039]: I1124 15:01:00.980657 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29399941-srhkf"] Nov 24 15:01:01 crc kubenswrapper[5039]: I1124 15:01:01.550648 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29399941-srhkf" event={"ID":"e88464ce-c201-4ce0-831a-bad31b599341","Type":"ContainerStarted","Data":"424fc263fa20ca3478bddc787058f460a5ab2c8c88398c35f6310f213170189f"} Nov 24 15:01:01 crc kubenswrapper[5039]: I1124 15:01:01.551172 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29399941-srhkf" event={"ID":"e88464ce-c201-4ce0-831a-bad31b599341","Type":"ContainerStarted","Data":"28ed2c4c19adadc5e436ffa624bfc71d1bf8abb97e163375716bbaae94b4df8c"} Nov 24 15:01:01 crc kubenswrapper[5039]: I1124 15:01:01.576274 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29399941-srhkf" podStartSLOduration=1.576251144 podStartE2EDuration="1.576251144s" podCreationTimestamp="2025-11-24 15:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 15:01:01.568183417 +0000 UTC m=+6174.007307917" watchObservedRunningTime="2025-11-24 15:01:01.576251144 +0000 UTC m=+6174.015375644" Nov 24 15:01:04 crc kubenswrapper[5039]: I1124 15:01:04.595659 5039 generic.go:334] "Generic (PLEG): container finished" podID="e88464ce-c201-4ce0-831a-bad31b599341" containerID="424fc263fa20ca3478bddc787058f460a5ab2c8c88398c35f6310f213170189f" exitCode=0 Nov 24 15:01:04 crc kubenswrapper[5039]: I1124 15:01:04.595722 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29399941-srhkf" event={"ID":"e88464ce-c201-4ce0-831a-bad31b599341","Type":"ContainerDied","Data":"424fc263fa20ca3478bddc787058f460a5ab2c8c88398c35f6310f213170189f"} Nov 24 15:01:06 crc kubenswrapper[5039]: I1124 15:01:06.565372 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29399941-srhkf" Nov 24 15:01:06 crc kubenswrapper[5039]: I1124 15:01:06.624770 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29399941-srhkf" event={"ID":"e88464ce-c201-4ce0-831a-bad31b599341","Type":"ContainerDied","Data":"28ed2c4c19adadc5e436ffa624bfc71d1bf8abb97e163375716bbaae94b4df8c"} Nov 24 15:01:06 crc kubenswrapper[5039]: I1124 15:01:06.624810 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="28ed2c4c19adadc5e436ffa624bfc71d1bf8abb97e163375716bbaae94b4df8c" Nov 24 15:01:06 crc kubenswrapper[5039]: I1124 15:01:06.624817 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29399941-srhkf" Nov 24 15:01:06 crc kubenswrapper[5039]: I1124 15:01:06.709334 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5nqch\" (UniqueName: \"kubernetes.io/projected/e88464ce-c201-4ce0-831a-bad31b599341-kube-api-access-5nqch\") pod \"e88464ce-c201-4ce0-831a-bad31b599341\" (UID: \"e88464ce-c201-4ce0-831a-bad31b599341\") " Nov 24 15:01:06 crc kubenswrapper[5039]: I1124 15:01:06.709626 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e88464ce-c201-4ce0-831a-bad31b599341-config-data\") pod \"e88464ce-c201-4ce0-831a-bad31b599341\" (UID: \"e88464ce-c201-4ce0-831a-bad31b599341\") " Nov 24 15:01:06 crc kubenswrapper[5039]: I1124 15:01:06.709657 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e88464ce-c201-4ce0-831a-bad31b599341-fernet-keys\") pod \"e88464ce-c201-4ce0-831a-bad31b599341\" (UID: \"e88464ce-c201-4ce0-831a-bad31b599341\") " Nov 24 15:01:06 crc kubenswrapper[5039]: I1124 15:01:06.709699 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e88464ce-c201-4ce0-831a-bad31b599341-combined-ca-bundle\") pod \"e88464ce-c201-4ce0-831a-bad31b599341\" (UID: \"e88464ce-c201-4ce0-831a-bad31b599341\") " Nov 24 15:01:06 crc kubenswrapper[5039]: I1124 15:01:06.718666 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e88464ce-c201-4ce0-831a-bad31b599341-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "e88464ce-c201-4ce0-831a-bad31b599341" (UID: "e88464ce-c201-4ce0-831a-bad31b599341"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 15:01:06 crc kubenswrapper[5039]: I1124 15:01:06.718692 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e88464ce-c201-4ce0-831a-bad31b599341-kube-api-access-5nqch" (OuterVolumeSpecName: "kube-api-access-5nqch") pod "e88464ce-c201-4ce0-831a-bad31b599341" (UID: "e88464ce-c201-4ce0-831a-bad31b599341"). InnerVolumeSpecName "kube-api-access-5nqch". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:01:06 crc kubenswrapper[5039]: I1124 15:01:06.748672 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e88464ce-c201-4ce0-831a-bad31b599341-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e88464ce-c201-4ce0-831a-bad31b599341" (UID: "e88464ce-c201-4ce0-831a-bad31b599341"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 15:01:06 crc kubenswrapper[5039]: I1124 15:01:06.800389 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e88464ce-c201-4ce0-831a-bad31b599341-config-data" (OuterVolumeSpecName: "config-data") pod "e88464ce-c201-4ce0-831a-bad31b599341" (UID: "e88464ce-c201-4ce0-831a-bad31b599341"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 15:01:06 crc kubenswrapper[5039]: I1124 15:01:06.824754 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5nqch\" (UniqueName: \"kubernetes.io/projected/e88464ce-c201-4ce0-831a-bad31b599341-kube-api-access-5nqch\") on node \"crc\" DevicePath \"\"" Nov 24 15:01:06 crc kubenswrapper[5039]: I1124 15:01:06.824784 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e88464ce-c201-4ce0-831a-bad31b599341-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 15:01:06 crc kubenswrapper[5039]: I1124 15:01:06.824794 5039 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e88464ce-c201-4ce0-831a-bad31b599341-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 24 15:01:06 crc kubenswrapper[5039]: I1124 15:01:06.824803 5039 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e88464ce-c201-4ce0-831a-bad31b599341-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 15:02:50 crc kubenswrapper[5039]: I1124 15:02:50.101303 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 15:02:50 crc kubenswrapper[5039]: I1124 15:02:50.102328 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 15:03:20 crc kubenswrapper[5039]: I1124 15:03:20.101657 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 15:03:20 crc kubenswrapper[5039]: I1124 15:03:20.102360 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 15:03:50 crc kubenswrapper[5039]: I1124 15:03:50.102235 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 15:03:50 crc kubenswrapper[5039]: I1124 15:03:50.103835 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 15:03:50 crc kubenswrapper[5039]: I1124 15:03:50.103920 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 15:03:50 crc kubenswrapper[5039]: I1124 15:03:50.105104 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 15:03:50 crc kubenswrapper[5039]: I1124 15:03:50.105211 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" gracePeriod=600 Nov 24 15:03:50 crc kubenswrapper[5039]: E1124 15:03:50.236741 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:03:50 crc kubenswrapper[5039]: I1124 15:03:50.507349 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" exitCode=0 Nov 24 15:03:50 crc kubenswrapper[5039]: I1124 15:03:50.507449 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b"} Nov 24 15:03:50 crc kubenswrapper[5039]: I1124 15:03:50.507536 5039 scope.go:117] "RemoveContainer" containerID="8e1b56e9a59952d23c6d0a5931bf75a97ec7b0035c65a6903040813adc8f08e3" Nov 24 15:03:50 crc kubenswrapper[5039]: I1124 15:03:50.508189 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:03:50 crc kubenswrapper[5039]: E1124 15:03:50.508488 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:04:02 crc kubenswrapper[5039]: I1124 15:04:02.307744 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:04:02 crc kubenswrapper[5039]: E1124 15:04:02.308928 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:04:15 crc kubenswrapper[5039]: I1124 15:04:15.306355 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:04:15 crc kubenswrapper[5039]: E1124 15:04:15.307398 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:04:18 crc kubenswrapper[5039]: I1124 15:04:18.874916 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fx22b"] Nov 24 15:04:18 crc kubenswrapper[5039]: E1124 15:04:18.876275 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e88464ce-c201-4ce0-831a-bad31b599341" containerName="keystone-cron" Nov 24 15:04:18 crc kubenswrapper[5039]: I1124 15:04:18.876292 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="e88464ce-c201-4ce0-831a-bad31b599341" containerName="keystone-cron" Nov 24 15:04:18 crc kubenswrapper[5039]: I1124 15:04:18.876556 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="e88464ce-c201-4ce0-831a-bad31b599341" containerName="keystone-cron" Nov 24 15:04:18 crc kubenswrapper[5039]: I1124 15:04:18.878574 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fx22b" Nov 24 15:04:18 crc kubenswrapper[5039]: I1124 15:04:18.931212 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fx22b"] Nov 24 15:04:19 crc kubenswrapper[5039]: I1124 15:04:19.073985 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12028a37-6a08-4236-b36e-789d9fe793e2-utilities\") pod \"redhat-operators-fx22b\" (UID: \"12028a37-6a08-4236-b36e-789d9fe793e2\") " pod="openshift-marketplace/redhat-operators-fx22b" Nov 24 15:04:19 crc kubenswrapper[5039]: I1124 15:04:19.074058 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12028a37-6a08-4236-b36e-789d9fe793e2-catalog-content\") pod \"redhat-operators-fx22b\" (UID: \"12028a37-6a08-4236-b36e-789d9fe793e2\") " pod="openshift-marketplace/redhat-operators-fx22b" Nov 24 15:04:19 crc kubenswrapper[5039]: I1124 15:04:19.074153 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tkhl6\" (UniqueName: \"kubernetes.io/projected/12028a37-6a08-4236-b36e-789d9fe793e2-kube-api-access-tkhl6\") pod \"redhat-operators-fx22b\" (UID: \"12028a37-6a08-4236-b36e-789d9fe793e2\") " pod="openshift-marketplace/redhat-operators-fx22b" Nov 24 15:04:19 crc kubenswrapper[5039]: I1124 15:04:19.177130 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12028a37-6a08-4236-b36e-789d9fe793e2-utilities\") pod \"redhat-operators-fx22b\" (UID: \"12028a37-6a08-4236-b36e-789d9fe793e2\") " pod="openshift-marketplace/redhat-operators-fx22b" Nov 24 15:04:19 crc kubenswrapper[5039]: I1124 15:04:19.177887 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12028a37-6a08-4236-b36e-789d9fe793e2-catalog-content\") pod \"redhat-operators-fx22b\" (UID: \"12028a37-6a08-4236-b36e-789d9fe793e2\") " pod="openshift-marketplace/redhat-operators-fx22b" Nov 24 15:04:19 crc kubenswrapper[5039]: I1124 15:04:19.178041 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12028a37-6a08-4236-b36e-789d9fe793e2-utilities\") pod \"redhat-operators-fx22b\" (UID: \"12028a37-6a08-4236-b36e-789d9fe793e2\") " pod="openshift-marketplace/redhat-operators-fx22b" Nov 24 15:04:19 crc kubenswrapper[5039]: I1124 15:04:19.178223 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tkhl6\" (UniqueName: \"kubernetes.io/projected/12028a37-6a08-4236-b36e-789d9fe793e2-kube-api-access-tkhl6\") pod \"redhat-operators-fx22b\" (UID: \"12028a37-6a08-4236-b36e-789d9fe793e2\") " pod="openshift-marketplace/redhat-operators-fx22b" Nov 24 15:04:19 crc kubenswrapper[5039]: I1124 15:04:19.178346 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12028a37-6a08-4236-b36e-789d9fe793e2-catalog-content\") pod \"redhat-operators-fx22b\" (UID: \"12028a37-6a08-4236-b36e-789d9fe793e2\") " pod="openshift-marketplace/redhat-operators-fx22b" Nov 24 15:04:19 crc kubenswrapper[5039]: I1124 15:04:19.203459 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tkhl6\" (UniqueName: \"kubernetes.io/projected/12028a37-6a08-4236-b36e-789d9fe793e2-kube-api-access-tkhl6\") pod \"redhat-operators-fx22b\" (UID: \"12028a37-6a08-4236-b36e-789d9fe793e2\") " pod="openshift-marketplace/redhat-operators-fx22b" Nov 24 15:04:19 crc kubenswrapper[5039]: I1124 15:04:19.216085 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fx22b" Nov 24 15:04:19 crc kubenswrapper[5039]: I1124 15:04:19.822410 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fx22b"] Nov 24 15:04:20 crc kubenswrapper[5039]: I1124 15:04:20.859105 5039 generic.go:334] "Generic (PLEG): container finished" podID="12028a37-6a08-4236-b36e-789d9fe793e2" containerID="99d5f564800239edeaf84936f0e587b4b5938379bc229d47d0dd2b363b3d7223" exitCode=0 Nov 24 15:04:20 crc kubenswrapper[5039]: I1124 15:04:20.859251 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fx22b" event={"ID":"12028a37-6a08-4236-b36e-789d9fe793e2","Type":"ContainerDied","Data":"99d5f564800239edeaf84936f0e587b4b5938379bc229d47d0dd2b363b3d7223"} Nov 24 15:04:20 crc kubenswrapper[5039]: I1124 15:04:20.859428 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fx22b" event={"ID":"12028a37-6a08-4236-b36e-789d9fe793e2","Type":"ContainerStarted","Data":"ae131d665f906148009b2e5bf7d363e6e801daafbae6eafdbfb49b30d357e9b5"} Nov 24 15:04:20 crc kubenswrapper[5039]: I1124 15:04:20.863015 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 15:04:22 crc kubenswrapper[5039]: I1124 15:04:22.897346 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fx22b" event={"ID":"12028a37-6a08-4236-b36e-789d9fe793e2","Type":"ContainerStarted","Data":"483ca5c73e1bfc4afd648f63aab24c21684b6ef1ba264ae4e3f6293922b295ef"} Nov 24 15:04:25 crc kubenswrapper[5039]: I1124 15:04:25.927692 5039 generic.go:334] "Generic (PLEG): container finished" podID="12028a37-6a08-4236-b36e-789d9fe793e2" containerID="483ca5c73e1bfc4afd648f63aab24c21684b6ef1ba264ae4e3f6293922b295ef" exitCode=0 Nov 24 15:04:25 crc kubenswrapper[5039]: I1124 15:04:25.927770 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fx22b" event={"ID":"12028a37-6a08-4236-b36e-789d9fe793e2","Type":"ContainerDied","Data":"483ca5c73e1bfc4afd648f63aab24c21684b6ef1ba264ae4e3f6293922b295ef"} Nov 24 15:04:26 crc kubenswrapper[5039]: I1124 15:04:26.939925 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fx22b" event={"ID":"12028a37-6a08-4236-b36e-789d9fe793e2","Type":"ContainerStarted","Data":"b68c7804add52f7b6d92083f9630967ababe1f5c74e624b904d4c66011cbfcf5"} Nov 24 15:04:26 crc kubenswrapper[5039]: I1124 15:04:26.966809 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fx22b" podStartSLOduration=3.489961534 podStartE2EDuration="8.966669728s" podCreationTimestamp="2025-11-24 15:04:18 +0000 UTC" firstStartedPulling="2025-11-24 15:04:20.861383511 +0000 UTC m=+6373.300508011" lastFinishedPulling="2025-11-24 15:04:26.338091705 +0000 UTC m=+6378.777216205" observedRunningTime="2025-11-24 15:04:26.959301187 +0000 UTC m=+6379.398425687" watchObservedRunningTime="2025-11-24 15:04:26.966669728 +0000 UTC m=+6379.405794218" Nov 24 15:04:27 crc kubenswrapper[5039]: I1124 15:04:27.307656 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:04:27 crc kubenswrapper[5039]: E1124 15:04:27.308172 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:04:29 crc kubenswrapper[5039]: I1124 15:04:29.217248 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fx22b" Nov 24 15:04:29 crc kubenswrapper[5039]: I1124 15:04:29.217631 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fx22b" Nov 24 15:04:30 crc kubenswrapper[5039]: I1124 15:04:30.272937 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fx22b" podUID="12028a37-6a08-4236-b36e-789d9fe793e2" containerName="registry-server" probeResult="failure" output=< Nov 24 15:04:30 crc kubenswrapper[5039]: timeout: failed to connect service ":50051" within 1s Nov 24 15:04:30 crc kubenswrapper[5039]: > Nov 24 15:04:39 crc kubenswrapper[5039]: I1124 15:04:39.264600 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fx22b" Nov 24 15:04:39 crc kubenswrapper[5039]: I1124 15:04:39.316459 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fx22b" Nov 24 15:04:39 crc kubenswrapper[5039]: I1124 15:04:39.510939 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fx22b"] Nov 24 15:04:41 crc kubenswrapper[5039]: I1124 15:04:41.113698 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fx22b" podUID="12028a37-6a08-4236-b36e-789d9fe793e2" containerName="registry-server" containerID="cri-o://b68c7804add52f7b6d92083f9630967ababe1f5c74e624b904d4c66011cbfcf5" gracePeriod=2 Nov 24 15:04:41 crc kubenswrapper[5039]: I1124 15:04:41.760396 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fx22b" Nov 24 15:04:41 crc kubenswrapper[5039]: I1124 15:04:41.890201 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12028a37-6a08-4236-b36e-789d9fe793e2-utilities\") pod \"12028a37-6a08-4236-b36e-789d9fe793e2\" (UID: \"12028a37-6a08-4236-b36e-789d9fe793e2\") " Nov 24 15:04:41 crc kubenswrapper[5039]: I1124 15:04:41.890563 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tkhl6\" (UniqueName: \"kubernetes.io/projected/12028a37-6a08-4236-b36e-789d9fe793e2-kube-api-access-tkhl6\") pod \"12028a37-6a08-4236-b36e-789d9fe793e2\" (UID: \"12028a37-6a08-4236-b36e-789d9fe793e2\") " Nov 24 15:04:41 crc kubenswrapper[5039]: I1124 15:04:41.890700 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12028a37-6a08-4236-b36e-789d9fe793e2-catalog-content\") pod \"12028a37-6a08-4236-b36e-789d9fe793e2\" (UID: \"12028a37-6a08-4236-b36e-789d9fe793e2\") " Nov 24 15:04:41 crc kubenswrapper[5039]: I1124 15:04:41.891091 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12028a37-6a08-4236-b36e-789d9fe793e2-utilities" (OuterVolumeSpecName: "utilities") pod "12028a37-6a08-4236-b36e-789d9fe793e2" (UID: "12028a37-6a08-4236-b36e-789d9fe793e2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:04:41 crc kubenswrapper[5039]: I1124 15:04:41.891412 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12028a37-6a08-4236-b36e-789d9fe793e2-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 15:04:41 crc kubenswrapper[5039]: I1124 15:04:41.899180 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12028a37-6a08-4236-b36e-789d9fe793e2-kube-api-access-tkhl6" (OuterVolumeSpecName: "kube-api-access-tkhl6") pod "12028a37-6a08-4236-b36e-789d9fe793e2" (UID: "12028a37-6a08-4236-b36e-789d9fe793e2"). InnerVolumeSpecName "kube-api-access-tkhl6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:04:41 crc kubenswrapper[5039]: I1124 15:04:41.990886 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12028a37-6a08-4236-b36e-789d9fe793e2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "12028a37-6a08-4236-b36e-789d9fe793e2" (UID: "12028a37-6a08-4236-b36e-789d9fe793e2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:04:41 crc kubenswrapper[5039]: I1124 15:04:41.993750 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tkhl6\" (UniqueName: \"kubernetes.io/projected/12028a37-6a08-4236-b36e-789d9fe793e2-kube-api-access-tkhl6\") on node \"crc\" DevicePath \"\"" Nov 24 15:04:41 crc kubenswrapper[5039]: I1124 15:04:41.993795 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12028a37-6a08-4236-b36e-789d9fe793e2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 15:04:42 crc kubenswrapper[5039]: I1124 15:04:42.126860 5039 generic.go:334] "Generic (PLEG): container finished" podID="12028a37-6a08-4236-b36e-789d9fe793e2" containerID="b68c7804add52f7b6d92083f9630967ababe1f5c74e624b904d4c66011cbfcf5" exitCode=0 Nov 24 15:04:42 crc kubenswrapper[5039]: I1124 15:04:42.126910 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fx22b" event={"ID":"12028a37-6a08-4236-b36e-789d9fe793e2","Type":"ContainerDied","Data":"b68c7804add52f7b6d92083f9630967ababe1f5c74e624b904d4c66011cbfcf5"} Nov 24 15:04:42 crc kubenswrapper[5039]: I1124 15:04:42.126931 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fx22b" Nov 24 15:04:42 crc kubenswrapper[5039]: I1124 15:04:42.126941 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fx22b" event={"ID":"12028a37-6a08-4236-b36e-789d9fe793e2","Type":"ContainerDied","Data":"ae131d665f906148009b2e5bf7d363e6e801daafbae6eafdbfb49b30d357e9b5"} Nov 24 15:04:42 crc kubenswrapper[5039]: I1124 15:04:42.126971 5039 scope.go:117] "RemoveContainer" containerID="b68c7804add52f7b6d92083f9630967ababe1f5c74e624b904d4c66011cbfcf5" Nov 24 15:04:42 crc kubenswrapper[5039]: I1124 15:04:42.167321 5039 scope.go:117] "RemoveContainer" containerID="483ca5c73e1bfc4afd648f63aab24c21684b6ef1ba264ae4e3f6293922b295ef" Nov 24 15:04:42 crc kubenswrapper[5039]: I1124 15:04:42.168369 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fx22b"] Nov 24 15:04:42 crc kubenswrapper[5039]: I1124 15:04:42.180884 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fx22b"] Nov 24 15:04:42 crc kubenswrapper[5039]: I1124 15:04:42.192062 5039 scope.go:117] "RemoveContainer" containerID="99d5f564800239edeaf84936f0e587b4b5938379bc229d47d0dd2b363b3d7223" Nov 24 15:04:42 crc kubenswrapper[5039]: I1124 15:04:42.245127 5039 scope.go:117] "RemoveContainer" containerID="b68c7804add52f7b6d92083f9630967ababe1f5c74e624b904d4c66011cbfcf5" Nov 24 15:04:42 crc kubenswrapper[5039]: E1124 15:04:42.245546 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b68c7804add52f7b6d92083f9630967ababe1f5c74e624b904d4c66011cbfcf5\": container with ID starting with b68c7804add52f7b6d92083f9630967ababe1f5c74e624b904d4c66011cbfcf5 not found: ID does not exist" containerID="b68c7804add52f7b6d92083f9630967ababe1f5c74e624b904d4c66011cbfcf5" Nov 24 15:04:42 crc kubenswrapper[5039]: I1124 15:04:42.245593 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b68c7804add52f7b6d92083f9630967ababe1f5c74e624b904d4c66011cbfcf5"} err="failed to get container status \"b68c7804add52f7b6d92083f9630967ababe1f5c74e624b904d4c66011cbfcf5\": rpc error: code = NotFound desc = could not find container \"b68c7804add52f7b6d92083f9630967ababe1f5c74e624b904d4c66011cbfcf5\": container with ID starting with b68c7804add52f7b6d92083f9630967ababe1f5c74e624b904d4c66011cbfcf5 not found: ID does not exist" Nov 24 15:04:42 crc kubenswrapper[5039]: I1124 15:04:42.245623 5039 scope.go:117] "RemoveContainer" containerID="483ca5c73e1bfc4afd648f63aab24c21684b6ef1ba264ae4e3f6293922b295ef" Nov 24 15:04:42 crc kubenswrapper[5039]: E1124 15:04:42.245961 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"483ca5c73e1bfc4afd648f63aab24c21684b6ef1ba264ae4e3f6293922b295ef\": container with ID starting with 483ca5c73e1bfc4afd648f63aab24c21684b6ef1ba264ae4e3f6293922b295ef not found: ID does not exist" containerID="483ca5c73e1bfc4afd648f63aab24c21684b6ef1ba264ae4e3f6293922b295ef" Nov 24 15:04:42 crc kubenswrapper[5039]: I1124 15:04:42.246015 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"483ca5c73e1bfc4afd648f63aab24c21684b6ef1ba264ae4e3f6293922b295ef"} err="failed to get container status \"483ca5c73e1bfc4afd648f63aab24c21684b6ef1ba264ae4e3f6293922b295ef\": rpc error: code = NotFound desc = could not find container \"483ca5c73e1bfc4afd648f63aab24c21684b6ef1ba264ae4e3f6293922b295ef\": container with ID starting with 483ca5c73e1bfc4afd648f63aab24c21684b6ef1ba264ae4e3f6293922b295ef not found: ID does not exist" Nov 24 15:04:42 crc kubenswrapper[5039]: I1124 15:04:42.246050 5039 scope.go:117] "RemoveContainer" containerID="99d5f564800239edeaf84936f0e587b4b5938379bc229d47d0dd2b363b3d7223" Nov 24 15:04:42 crc kubenswrapper[5039]: E1124 15:04:42.246370 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99d5f564800239edeaf84936f0e587b4b5938379bc229d47d0dd2b363b3d7223\": container with ID starting with 99d5f564800239edeaf84936f0e587b4b5938379bc229d47d0dd2b363b3d7223 not found: ID does not exist" containerID="99d5f564800239edeaf84936f0e587b4b5938379bc229d47d0dd2b363b3d7223" Nov 24 15:04:42 crc kubenswrapper[5039]: I1124 15:04:42.246407 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99d5f564800239edeaf84936f0e587b4b5938379bc229d47d0dd2b363b3d7223"} err="failed to get container status \"99d5f564800239edeaf84936f0e587b4b5938379bc229d47d0dd2b363b3d7223\": rpc error: code = NotFound desc = could not find container \"99d5f564800239edeaf84936f0e587b4b5938379bc229d47d0dd2b363b3d7223\": container with ID starting with 99d5f564800239edeaf84936f0e587b4b5938379bc229d47d0dd2b363b3d7223 not found: ID does not exist" Nov 24 15:04:42 crc kubenswrapper[5039]: I1124 15:04:42.308434 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:04:42 crc kubenswrapper[5039]: E1124 15:04:42.308928 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:04:42 crc kubenswrapper[5039]: I1124 15:04:42.320327 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12028a37-6a08-4236-b36e-789d9fe793e2" path="/var/lib/kubelet/pods/12028a37-6a08-4236-b36e-789d9fe793e2/volumes" Nov 24 15:04:57 crc kubenswrapper[5039]: I1124 15:04:57.307657 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:04:57 crc kubenswrapper[5039]: E1124 15:04:57.308525 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:05:12 crc kubenswrapper[5039]: I1124 15:05:12.307237 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:05:12 crc kubenswrapper[5039]: E1124 15:05:12.308162 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:05:25 crc kubenswrapper[5039]: I1124 15:05:25.308105 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:05:25 crc kubenswrapper[5039]: E1124 15:05:25.309306 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:05:40 crc kubenswrapper[5039]: I1124 15:05:40.307952 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:05:40 crc kubenswrapper[5039]: E1124 15:05:40.308731 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:05:53 crc kubenswrapper[5039]: I1124 15:05:53.307757 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:05:53 crc kubenswrapper[5039]: E1124 15:05:53.308651 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:06:05 crc kubenswrapper[5039]: I1124 15:06:05.307321 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:06:05 crc kubenswrapper[5039]: E1124 15:06:05.308252 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:06:17 crc kubenswrapper[5039]: I1124 15:06:17.306948 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:06:17 crc kubenswrapper[5039]: E1124 15:06:17.307941 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:06:29 crc kubenswrapper[5039]: I1124 15:06:29.307117 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:06:29 crc kubenswrapper[5039]: E1124 15:06:29.307928 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:06:42 crc kubenswrapper[5039]: I1124 15:06:42.309031 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:06:42 crc kubenswrapper[5039]: E1124 15:06:42.310122 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:06:57 crc kubenswrapper[5039]: I1124 15:06:57.308798 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:06:57 crc kubenswrapper[5039]: E1124 15:06:57.309779 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:07:11 crc kubenswrapper[5039]: I1124 15:07:11.306931 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:07:11 crc kubenswrapper[5039]: E1124 15:07:11.307907 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:07:22 crc kubenswrapper[5039]: I1124 15:07:22.425096 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-tjf8g"] Nov 24 15:07:22 crc kubenswrapper[5039]: E1124 15:07:22.426379 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12028a37-6a08-4236-b36e-789d9fe793e2" containerName="extract-content" Nov 24 15:07:22 crc kubenswrapper[5039]: I1124 15:07:22.426401 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="12028a37-6a08-4236-b36e-789d9fe793e2" containerName="extract-content" Nov 24 15:07:22 crc kubenswrapper[5039]: E1124 15:07:22.426431 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12028a37-6a08-4236-b36e-789d9fe793e2" containerName="extract-utilities" Nov 24 15:07:22 crc kubenswrapper[5039]: I1124 15:07:22.426445 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="12028a37-6a08-4236-b36e-789d9fe793e2" containerName="extract-utilities" Nov 24 15:07:22 crc kubenswrapper[5039]: E1124 15:07:22.426546 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12028a37-6a08-4236-b36e-789d9fe793e2" containerName="registry-server" Nov 24 15:07:22 crc kubenswrapper[5039]: I1124 15:07:22.426559 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="12028a37-6a08-4236-b36e-789d9fe793e2" containerName="registry-server" Nov 24 15:07:22 crc kubenswrapper[5039]: I1124 15:07:22.426932 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="12028a37-6a08-4236-b36e-789d9fe793e2" containerName="registry-server" Nov 24 15:07:22 crc kubenswrapper[5039]: I1124 15:07:22.429326 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tjf8g" Nov 24 15:07:22 crc kubenswrapper[5039]: I1124 15:07:22.457983 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tjf8g"] Nov 24 15:07:22 crc kubenswrapper[5039]: I1124 15:07:22.550395 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/941a61a5-df35-49a4-a864-07f3008717fc-utilities\") pod \"redhat-marketplace-tjf8g\" (UID: \"941a61a5-df35-49a4-a864-07f3008717fc\") " pod="openshift-marketplace/redhat-marketplace-tjf8g" Nov 24 15:07:22 crc kubenswrapper[5039]: I1124 15:07:22.550577 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qf8j4\" (UniqueName: \"kubernetes.io/projected/941a61a5-df35-49a4-a864-07f3008717fc-kube-api-access-qf8j4\") pod \"redhat-marketplace-tjf8g\" (UID: \"941a61a5-df35-49a4-a864-07f3008717fc\") " pod="openshift-marketplace/redhat-marketplace-tjf8g" Nov 24 15:07:22 crc kubenswrapper[5039]: I1124 15:07:22.550606 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/941a61a5-df35-49a4-a864-07f3008717fc-catalog-content\") pod \"redhat-marketplace-tjf8g\" (UID: \"941a61a5-df35-49a4-a864-07f3008717fc\") " pod="openshift-marketplace/redhat-marketplace-tjf8g" Nov 24 15:07:22 crc kubenswrapper[5039]: I1124 15:07:22.653061 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/941a61a5-df35-49a4-a864-07f3008717fc-utilities\") pod \"redhat-marketplace-tjf8g\" (UID: \"941a61a5-df35-49a4-a864-07f3008717fc\") " pod="openshift-marketplace/redhat-marketplace-tjf8g" Nov 24 15:07:22 crc kubenswrapper[5039]: I1124 15:07:22.653215 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qf8j4\" (UniqueName: \"kubernetes.io/projected/941a61a5-df35-49a4-a864-07f3008717fc-kube-api-access-qf8j4\") pod \"redhat-marketplace-tjf8g\" (UID: \"941a61a5-df35-49a4-a864-07f3008717fc\") " pod="openshift-marketplace/redhat-marketplace-tjf8g" Nov 24 15:07:22 crc kubenswrapper[5039]: I1124 15:07:22.653251 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/941a61a5-df35-49a4-a864-07f3008717fc-catalog-content\") pod \"redhat-marketplace-tjf8g\" (UID: \"941a61a5-df35-49a4-a864-07f3008717fc\") " pod="openshift-marketplace/redhat-marketplace-tjf8g" Nov 24 15:07:22 crc kubenswrapper[5039]: I1124 15:07:22.653580 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/941a61a5-df35-49a4-a864-07f3008717fc-utilities\") pod \"redhat-marketplace-tjf8g\" (UID: \"941a61a5-df35-49a4-a864-07f3008717fc\") " pod="openshift-marketplace/redhat-marketplace-tjf8g" Nov 24 15:07:22 crc kubenswrapper[5039]: I1124 15:07:22.653774 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/941a61a5-df35-49a4-a864-07f3008717fc-catalog-content\") pod \"redhat-marketplace-tjf8g\" (UID: \"941a61a5-df35-49a4-a864-07f3008717fc\") " pod="openshift-marketplace/redhat-marketplace-tjf8g" Nov 24 15:07:22 crc kubenswrapper[5039]: I1124 15:07:22.676694 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qf8j4\" (UniqueName: \"kubernetes.io/projected/941a61a5-df35-49a4-a864-07f3008717fc-kube-api-access-qf8j4\") pod \"redhat-marketplace-tjf8g\" (UID: \"941a61a5-df35-49a4-a864-07f3008717fc\") " pod="openshift-marketplace/redhat-marketplace-tjf8g" Nov 24 15:07:22 crc kubenswrapper[5039]: I1124 15:07:22.752306 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tjf8g" Nov 24 15:07:23 crc kubenswrapper[5039]: I1124 15:07:23.259346 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tjf8g"] Nov 24 15:07:23 crc kubenswrapper[5039]: I1124 15:07:23.342162 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tjf8g" event={"ID":"941a61a5-df35-49a4-a864-07f3008717fc","Type":"ContainerStarted","Data":"a10f3d322287cf7a7990003f0b05d4a1a5b666b85eebb94a9e28bfd633c50180"} Nov 24 15:07:24 crc kubenswrapper[5039]: I1124 15:07:24.370927 5039 generic.go:334] "Generic (PLEG): container finished" podID="941a61a5-df35-49a4-a864-07f3008717fc" containerID="251fc5ad3068a7193642ca45aaeb9b109628f0df6dac096825c9815222a265f1" exitCode=0 Nov 24 15:07:24 crc kubenswrapper[5039]: I1124 15:07:24.371049 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tjf8g" event={"ID":"941a61a5-df35-49a4-a864-07f3008717fc","Type":"ContainerDied","Data":"251fc5ad3068a7193642ca45aaeb9b109628f0df6dac096825c9815222a265f1"} Nov 24 15:07:25 crc kubenswrapper[5039]: I1124 15:07:25.307769 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:07:25 crc kubenswrapper[5039]: E1124 15:07:25.308643 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:07:26 crc kubenswrapper[5039]: I1124 15:07:26.392486 5039 generic.go:334] "Generic (PLEG): container finished" podID="941a61a5-df35-49a4-a864-07f3008717fc" containerID="956ab7df70e3a455fc8e8f47a17b43ff7a8ee81bf0665daf9f9bcc65479c8765" exitCode=0 Nov 24 15:07:26 crc kubenswrapper[5039]: I1124 15:07:26.392565 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tjf8g" event={"ID":"941a61a5-df35-49a4-a864-07f3008717fc","Type":"ContainerDied","Data":"956ab7df70e3a455fc8e8f47a17b43ff7a8ee81bf0665daf9f9bcc65479c8765"} Nov 24 15:07:27 crc kubenswrapper[5039]: I1124 15:07:27.406186 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tjf8g" event={"ID":"941a61a5-df35-49a4-a864-07f3008717fc","Type":"ContainerStarted","Data":"f21279e494cbc83a533f48ea1eee9d16f2e1508d3ed877fe6810830dbe58670a"} Nov 24 15:07:27 crc kubenswrapper[5039]: I1124 15:07:27.441258 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-tjf8g" podStartSLOduration=3.011369265 podStartE2EDuration="5.441231457s" podCreationTimestamp="2025-11-24 15:07:22 +0000 UTC" firstStartedPulling="2025-11-24 15:07:24.377897323 +0000 UTC m=+6556.817021833" lastFinishedPulling="2025-11-24 15:07:26.807759525 +0000 UTC m=+6559.246884025" observedRunningTime="2025-11-24 15:07:27.433072778 +0000 UTC m=+6559.872197288" watchObservedRunningTime="2025-11-24 15:07:27.441231457 +0000 UTC m=+6559.880355957" Nov 24 15:07:32 crc kubenswrapper[5039]: I1124 15:07:32.753049 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-tjf8g" Nov 24 15:07:32 crc kubenswrapper[5039]: I1124 15:07:32.753541 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-tjf8g" Nov 24 15:07:32 crc kubenswrapper[5039]: I1124 15:07:32.837786 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-tjf8g" Nov 24 15:07:33 crc kubenswrapper[5039]: I1124 15:07:33.570646 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-tjf8g" Nov 24 15:07:33 crc kubenswrapper[5039]: I1124 15:07:33.647849 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tjf8g"] Nov 24 15:07:35 crc kubenswrapper[5039]: I1124 15:07:35.501613 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-tjf8g" podUID="941a61a5-df35-49a4-a864-07f3008717fc" containerName="registry-server" containerID="cri-o://f21279e494cbc83a533f48ea1eee9d16f2e1508d3ed877fe6810830dbe58670a" gracePeriod=2 Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.011778 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tjf8g" Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.089218 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qf8j4\" (UniqueName: \"kubernetes.io/projected/941a61a5-df35-49a4-a864-07f3008717fc-kube-api-access-qf8j4\") pod \"941a61a5-df35-49a4-a864-07f3008717fc\" (UID: \"941a61a5-df35-49a4-a864-07f3008717fc\") " Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.089266 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/941a61a5-df35-49a4-a864-07f3008717fc-catalog-content\") pod \"941a61a5-df35-49a4-a864-07f3008717fc\" (UID: \"941a61a5-df35-49a4-a864-07f3008717fc\") " Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.089458 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/941a61a5-df35-49a4-a864-07f3008717fc-utilities\") pod \"941a61a5-df35-49a4-a864-07f3008717fc\" (UID: \"941a61a5-df35-49a4-a864-07f3008717fc\") " Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.090219 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/941a61a5-df35-49a4-a864-07f3008717fc-utilities" (OuterVolumeSpecName: "utilities") pod "941a61a5-df35-49a4-a864-07f3008717fc" (UID: "941a61a5-df35-49a4-a864-07f3008717fc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.095349 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/941a61a5-df35-49a4-a864-07f3008717fc-kube-api-access-qf8j4" (OuterVolumeSpecName: "kube-api-access-qf8j4") pod "941a61a5-df35-49a4-a864-07f3008717fc" (UID: "941a61a5-df35-49a4-a864-07f3008717fc"). InnerVolumeSpecName "kube-api-access-qf8j4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.126249 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/941a61a5-df35-49a4-a864-07f3008717fc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "941a61a5-df35-49a4-a864-07f3008717fc" (UID: "941a61a5-df35-49a4-a864-07f3008717fc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.192518 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/941a61a5-df35-49a4-a864-07f3008717fc-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.192557 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qf8j4\" (UniqueName: \"kubernetes.io/projected/941a61a5-df35-49a4-a864-07f3008717fc-kube-api-access-qf8j4\") on node \"crc\" DevicePath \"\"" Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.192570 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/941a61a5-df35-49a4-a864-07f3008717fc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.512909 5039 generic.go:334] "Generic (PLEG): container finished" podID="941a61a5-df35-49a4-a864-07f3008717fc" containerID="f21279e494cbc83a533f48ea1eee9d16f2e1508d3ed877fe6810830dbe58670a" exitCode=0 Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.512948 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tjf8g" event={"ID":"941a61a5-df35-49a4-a864-07f3008717fc","Type":"ContainerDied","Data":"f21279e494cbc83a533f48ea1eee9d16f2e1508d3ed877fe6810830dbe58670a"} Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.512981 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tjf8g" event={"ID":"941a61a5-df35-49a4-a864-07f3008717fc","Type":"ContainerDied","Data":"a10f3d322287cf7a7990003f0b05d4a1a5b666b85eebb94a9e28bfd633c50180"} Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.512977 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tjf8g" Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.513009 5039 scope.go:117] "RemoveContainer" containerID="f21279e494cbc83a533f48ea1eee9d16f2e1508d3ed877fe6810830dbe58670a" Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.536558 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tjf8g"] Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.547883 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-tjf8g"] Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.551596 5039 scope.go:117] "RemoveContainer" containerID="956ab7df70e3a455fc8e8f47a17b43ff7a8ee81bf0665daf9f9bcc65479c8765" Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.571430 5039 scope.go:117] "RemoveContainer" containerID="251fc5ad3068a7193642ca45aaeb9b109628f0df6dac096825c9815222a265f1" Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.626404 5039 scope.go:117] "RemoveContainer" containerID="f21279e494cbc83a533f48ea1eee9d16f2e1508d3ed877fe6810830dbe58670a" Nov 24 15:07:36 crc kubenswrapper[5039]: E1124 15:07:36.626907 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f21279e494cbc83a533f48ea1eee9d16f2e1508d3ed877fe6810830dbe58670a\": container with ID starting with f21279e494cbc83a533f48ea1eee9d16f2e1508d3ed877fe6810830dbe58670a not found: ID does not exist" containerID="f21279e494cbc83a533f48ea1eee9d16f2e1508d3ed877fe6810830dbe58670a" Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.626937 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f21279e494cbc83a533f48ea1eee9d16f2e1508d3ed877fe6810830dbe58670a"} err="failed to get container status \"f21279e494cbc83a533f48ea1eee9d16f2e1508d3ed877fe6810830dbe58670a\": rpc error: code = NotFound desc = could not find container \"f21279e494cbc83a533f48ea1eee9d16f2e1508d3ed877fe6810830dbe58670a\": container with ID starting with f21279e494cbc83a533f48ea1eee9d16f2e1508d3ed877fe6810830dbe58670a not found: ID does not exist" Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.626961 5039 scope.go:117] "RemoveContainer" containerID="956ab7df70e3a455fc8e8f47a17b43ff7a8ee81bf0665daf9f9bcc65479c8765" Nov 24 15:07:36 crc kubenswrapper[5039]: E1124 15:07:36.627242 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"956ab7df70e3a455fc8e8f47a17b43ff7a8ee81bf0665daf9f9bcc65479c8765\": container with ID starting with 956ab7df70e3a455fc8e8f47a17b43ff7a8ee81bf0665daf9f9bcc65479c8765 not found: ID does not exist" containerID="956ab7df70e3a455fc8e8f47a17b43ff7a8ee81bf0665daf9f9bcc65479c8765" Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.627296 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"956ab7df70e3a455fc8e8f47a17b43ff7a8ee81bf0665daf9f9bcc65479c8765"} err="failed to get container status \"956ab7df70e3a455fc8e8f47a17b43ff7a8ee81bf0665daf9f9bcc65479c8765\": rpc error: code = NotFound desc = could not find container \"956ab7df70e3a455fc8e8f47a17b43ff7a8ee81bf0665daf9f9bcc65479c8765\": container with ID starting with 956ab7df70e3a455fc8e8f47a17b43ff7a8ee81bf0665daf9f9bcc65479c8765 not found: ID does not exist" Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.627330 5039 scope.go:117] "RemoveContainer" containerID="251fc5ad3068a7193642ca45aaeb9b109628f0df6dac096825c9815222a265f1" Nov 24 15:07:36 crc kubenswrapper[5039]: E1124 15:07:36.627720 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"251fc5ad3068a7193642ca45aaeb9b109628f0df6dac096825c9815222a265f1\": container with ID starting with 251fc5ad3068a7193642ca45aaeb9b109628f0df6dac096825c9815222a265f1 not found: ID does not exist" containerID="251fc5ad3068a7193642ca45aaeb9b109628f0df6dac096825c9815222a265f1" Nov 24 15:07:36 crc kubenswrapper[5039]: I1124 15:07:36.627757 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"251fc5ad3068a7193642ca45aaeb9b109628f0df6dac096825c9815222a265f1"} err="failed to get container status \"251fc5ad3068a7193642ca45aaeb9b109628f0df6dac096825c9815222a265f1\": rpc error: code = NotFound desc = could not find container \"251fc5ad3068a7193642ca45aaeb9b109628f0df6dac096825c9815222a265f1\": container with ID starting with 251fc5ad3068a7193642ca45aaeb9b109628f0df6dac096825c9815222a265f1 not found: ID does not exist" Nov 24 15:07:37 crc kubenswrapper[5039]: I1124 15:07:37.307334 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:07:37 crc kubenswrapper[5039]: E1124 15:07:37.307787 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:07:38 crc kubenswrapper[5039]: I1124 15:07:38.317896 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="941a61a5-df35-49a4-a864-07f3008717fc" path="/var/lib/kubelet/pods/941a61a5-df35-49a4-a864-07f3008717fc/volumes" Nov 24 15:07:52 crc kubenswrapper[5039]: I1124 15:07:52.308534 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:07:52 crc kubenswrapper[5039]: E1124 15:07:52.309800 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:08:03 crc kubenswrapper[5039]: I1124 15:08:03.308291 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:08:03 crc kubenswrapper[5039]: E1124 15:08:03.309334 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:08:09 crc kubenswrapper[5039]: I1124 15:08:09.157348 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xqj6j"] Nov 24 15:08:09 crc kubenswrapper[5039]: E1124 15:08:09.158259 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="941a61a5-df35-49a4-a864-07f3008717fc" containerName="registry-server" Nov 24 15:08:09 crc kubenswrapper[5039]: I1124 15:08:09.158274 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="941a61a5-df35-49a4-a864-07f3008717fc" containerName="registry-server" Nov 24 15:08:09 crc kubenswrapper[5039]: E1124 15:08:09.158287 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="941a61a5-df35-49a4-a864-07f3008717fc" containerName="extract-utilities" Nov 24 15:08:09 crc kubenswrapper[5039]: I1124 15:08:09.158296 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="941a61a5-df35-49a4-a864-07f3008717fc" containerName="extract-utilities" Nov 24 15:08:09 crc kubenswrapper[5039]: E1124 15:08:09.158317 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="941a61a5-df35-49a4-a864-07f3008717fc" containerName="extract-content" Nov 24 15:08:09 crc kubenswrapper[5039]: I1124 15:08:09.158324 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="941a61a5-df35-49a4-a864-07f3008717fc" containerName="extract-content" Nov 24 15:08:09 crc kubenswrapper[5039]: I1124 15:08:09.158647 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="941a61a5-df35-49a4-a864-07f3008717fc" containerName="registry-server" Nov 24 15:08:09 crc kubenswrapper[5039]: I1124 15:08:09.160622 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xqj6j" Nov 24 15:08:09 crc kubenswrapper[5039]: I1124 15:08:09.173973 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xqj6j"] Nov 24 15:08:09 crc kubenswrapper[5039]: I1124 15:08:09.301141 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgqwf\" (UniqueName: \"kubernetes.io/projected/9adafaaa-a543-480f-8c02-084b7180ad88-kube-api-access-bgqwf\") pod \"certified-operators-xqj6j\" (UID: \"9adafaaa-a543-480f-8c02-084b7180ad88\") " pod="openshift-marketplace/certified-operators-xqj6j" Nov 24 15:08:09 crc kubenswrapper[5039]: I1124 15:08:09.301195 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9adafaaa-a543-480f-8c02-084b7180ad88-catalog-content\") pod \"certified-operators-xqj6j\" (UID: \"9adafaaa-a543-480f-8c02-084b7180ad88\") " pod="openshift-marketplace/certified-operators-xqj6j" Nov 24 15:08:09 crc kubenswrapper[5039]: I1124 15:08:09.301658 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9adafaaa-a543-480f-8c02-084b7180ad88-utilities\") pod \"certified-operators-xqj6j\" (UID: \"9adafaaa-a543-480f-8c02-084b7180ad88\") " pod="openshift-marketplace/certified-operators-xqj6j" Nov 24 15:08:09 crc kubenswrapper[5039]: I1124 15:08:09.403956 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgqwf\" (UniqueName: \"kubernetes.io/projected/9adafaaa-a543-480f-8c02-084b7180ad88-kube-api-access-bgqwf\") pod \"certified-operators-xqj6j\" (UID: \"9adafaaa-a543-480f-8c02-084b7180ad88\") " pod="openshift-marketplace/certified-operators-xqj6j" Nov 24 15:08:09 crc kubenswrapper[5039]: I1124 15:08:09.404021 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9adafaaa-a543-480f-8c02-084b7180ad88-catalog-content\") pod \"certified-operators-xqj6j\" (UID: \"9adafaaa-a543-480f-8c02-084b7180ad88\") " pod="openshift-marketplace/certified-operators-xqj6j" Nov 24 15:08:09 crc kubenswrapper[5039]: I1124 15:08:09.404174 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9adafaaa-a543-480f-8c02-084b7180ad88-utilities\") pod \"certified-operators-xqj6j\" (UID: \"9adafaaa-a543-480f-8c02-084b7180ad88\") " pod="openshift-marketplace/certified-operators-xqj6j" Nov 24 15:08:09 crc kubenswrapper[5039]: I1124 15:08:09.405095 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9adafaaa-a543-480f-8c02-084b7180ad88-catalog-content\") pod \"certified-operators-xqj6j\" (UID: \"9adafaaa-a543-480f-8c02-084b7180ad88\") " pod="openshift-marketplace/certified-operators-xqj6j" Nov 24 15:08:09 crc kubenswrapper[5039]: I1124 15:08:09.405220 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9adafaaa-a543-480f-8c02-084b7180ad88-utilities\") pod \"certified-operators-xqj6j\" (UID: \"9adafaaa-a543-480f-8c02-084b7180ad88\") " pod="openshift-marketplace/certified-operators-xqj6j" Nov 24 15:08:09 crc kubenswrapper[5039]: I1124 15:08:09.429018 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgqwf\" (UniqueName: \"kubernetes.io/projected/9adafaaa-a543-480f-8c02-084b7180ad88-kube-api-access-bgqwf\") pod \"certified-operators-xqj6j\" (UID: \"9adafaaa-a543-480f-8c02-084b7180ad88\") " pod="openshift-marketplace/certified-operators-xqj6j" Nov 24 15:08:09 crc kubenswrapper[5039]: I1124 15:08:09.484220 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xqj6j" Nov 24 15:08:10 crc kubenswrapper[5039]: I1124 15:08:10.084656 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xqj6j"] Nov 24 15:08:10 crc kubenswrapper[5039]: I1124 15:08:10.936029 5039 generic.go:334] "Generic (PLEG): container finished" podID="9adafaaa-a543-480f-8c02-084b7180ad88" containerID="699ad305b800b19ed96e65c4694b3afd0490648a4df00a3153fe9e06986f3486" exitCode=0 Nov 24 15:08:10 crc kubenswrapper[5039]: I1124 15:08:10.936099 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xqj6j" event={"ID":"9adafaaa-a543-480f-8c02-084b7180ad88","Type":"ContainerDied","Data":"699ad305b800b19ed96e65c4694b3afd0490648a4df00a3153fe9e06986f3486"} Nov 24 15:08:10 crc kubenswrapper[5039]: I1124 15:08:10.936384 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xqj6j" event={"ID":"9adafaaa-a543-480f-8c02-084b7180ad88","Type":"ContainerStarted","Data":"569a8debb72c9083856884468c8131c0faeaede698b34892c94f40fcc755b3d6"} Nov 24 15:08:11 crc kubenswrapper[5039]: I1124 15:08:11.946646 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xqj6j" event={"ID":"9adafaaa-a543-480f-8c02-084b7180ad88","Type":"ContainerStarted","Data":"552710a18803b3dbd1125f240a045c133c14c805a8efb9c7290459316d716c52"} Nov 24 15:08:13 crc kubenswrapper[5039]: I1124 15:08:13.967766 5039 generic.go:334] "Generic (PLEG): container finished" podID="9adafaaa-a543-480f-8c02-084b7180ad88" containerID="552710a18803b3dbd1125f240a045c133c14c805a8efb9c7290459316d716c52" exitCode=0 Nov 24 15:08:13 crc kubenswrapper[5039]: I1124 15:08:13.967834 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xqj6j" event={"ID":"9adafaaa-a543-480f-8c02-084b7180ad88","Type":"ContainerDied","Data":"552710a18803b3dbd1125f240a045c133c14c805a8efb9c7290459316d716c52"} Nov 24 15:08:14 crc kubenswrapper[5039]: I1124 15:08:14.981088 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xqj6j" event={"ID":"9adafaaa-a543-480f-8c02-084b7180ad88","Type":"ContainerStarted","Data":"162327f222bdd9a9e85df1ada135ce8acb88b16dea6106a8d25fc549b6b5e7df"} Nov 24 15:08:15 crc kubenswrapper[5039]: I1124 15:08:15.000702 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xqj6j" podStartSLOduration=2.484678298 podStartE2EDuration="6.000687304s" podCreationTimestamp="2025-11-24 15:08:09 +0000 UTC" firstStartedPulling="2025-11-24 15:08:10.938290279 +0000 UTC m=+6603.377414779" lastFinishedPulling="2025-11-24 15:08:14.454299275 +0000 UTC m=+6606.893423785" observedRunningTime="2025-11-24 15:08:14.998918 +0000 UTC m=+6607.438042500" watchObservedRunningTime="2025-11-24 15:08:15.000687304 +0000 UTC m=+6607.439811804" Nov 24 15:08:16 crc kubenswrapper[5039]: I1124 15:08:16.307581 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:08:16 crc kubenswrapper[5039]: E1124 15:08:16.308163 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:08:19 crc kubenswrapper[5039]: I1124 15:08:19.484616 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xqj6j" Nov 24 15:08:19 crc kubenswrapper[5039]: I1124 15:08:19.485089 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xqj6j" Nov 24 15:08:19 crc kubenswrapper[5039]: I1124 15:08:19.539565 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xqj6j" Nov 24 15:08:20 crc kubenswrapper[5039]: I1124 15:08:20.077276 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xqj6j" Nov 24 15:08:20 crc kubenswrapper[5039]: I1124 15:08:20.146631 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xqj6j"] Nov 24 15:08:22 crc kubenswrapper[5039]: I1124 15:08:22.050714 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xqj6j" podUID="9adafaaa-a543-480f-8c02-084b7180ad88" containerName="registry-server" containerID="cri-o://162327f222bdd9a9e85df1ada135ce8acb88b16dea6106a8d25fc549b6b5e7df" gracePeriod=2 Nov 24 15:08:22 crc kubenswrapper[5039]: I1124 15:08:22.680254 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xqj6j" Nov 24 15:08:22 crc kubenswrapper[5039]: I1124 15:08:22.857836 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9adafaaa-a543-480f-8c02-084b7180ad88-catalog-content\") pod \"9adafaaa-a543-480f-8c02-084b7180ad88\" (UID: \"9adafaaa-a543-480f-8c02-084b7180ad88\") " Nov 24 15:08:22 crc kubenswrapper[5039]: I1124 15:08:22.858142 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9adafaaa-a543-480f-8c02-084b7180ad88-utilities\") pod \"9adafaaa-a543-480f-8c02-084b7180ad88\" (UID: \"9adafaaa-a543-480f-8c02-084b7180ad88\") " Nov 24 15:08:22 crc kubenswrapper[5039]: I1124 15:08:22.858819 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9adafaaa-a543-480f-8c02-084b7180ad88-utilities" (OuterVolumeSpecName: "utilities") pod "9adafaaa-a543-480f-8c02-084b7180ad88" (UID: "9adafaaa-a543-480f-8c02-084b7180ad88"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:08:22 crc kubenswrapper[5039]: I1124 15:08:22.858860 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bgqwf\" (UniqueName: \"kubernetes.io/projected/9adafaaa-a543-480f-8c02-084b7180ad88-kube-api-access-bgqwf\") pod \"9adafaaa-a543-480f-8c02-084b7180ad88\" (UID: \"9adafaaa-a543-480f-8c02-084b7180ad88\") " Nov 24 15:08:22 crc kubenswrapper[5039]: I1124 15:08:22.860902 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9adafaaa-a543-480f-8c02-084b7180ad88-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 15:08:22 crc kubenswrapper[5039]: I1124 15:08:22.864118 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9adafaaa-a543-480f-8c02-084b7180ad88-kube-api-access-bgqwf" (OuterVolumeSpecName: "kube-api-access-bgqwf") pod "9adafaaa-a543-480f-8c02-084b7180ad88" (UID: "9adafaaa-a543-480f-8c02-084b7180ad88"). InnerVolumeSpecName "kube-api-access-bgqwf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:08:22 crc kubenswrapper[5039]: I1124 15:08:22.902262 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9adafaaa-a543-480f-8c02-084b7180ad88-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9adafaaa-a543-480f-8c02-084b7180ad88" (UID: "9adafaaa-a543-480f-8c02-084b7180ad88"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:08:22 crc kubenswrapper[5039]: I1124 15:08:22.963234 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9adafaaa-a543-480f-8c02-084b7180ad88-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 15:08:22 crc kubenswrapper[5039]: I1124 15:08:22.963265 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bgqwf\" (UniqueName: \"kubernetes.io/projected/9adafaaa-a543-480f-8c02-084b7180ad88-kube-api-access-bgqwf\") on node \"crc\" DevicePath \"\"" Nov 24 15:08:23 crc kubenswrapper[5039]: I1124 15:08:23.065497 5039 generic.go:334] "Generic (PLEG): container finished" podID="9adafaaa-a543-480f-8c02-084b7180ad88" containerID="162327f222bdd9a9e85df1ada135ce8acb88b16dea6106a8d25fc549b6b5e7df" exitCode=0 Nov 24 15:08:23 crc kubenswrapper[5039]: I1124 15:08:23.065557 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xqj6j" Nov 24 15:08:23 crc kubenswrapper[5039]: I1124 15:08:23.065562 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xqj6j" event={"ID":"9adafaaa-a543-480f-8c02-084b7180ad88","Type":"ContainerDied","Data":"162327f222bdd9a9e85df1ada135ce8acb88b16dea6106a8d25fc549b6b5e7df"} Nov 24 15:08:23 crc kubenswrapper[5039]: I1124 15:08:23.065634 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xqj6j" event={"ID":"9adafaaa-a543-480f-8c02-084b7180ad88","Type":"ContainerDied","Data":"569a8debb72c9083856884468c8131c0faeaede698b34892c94f40fcc755b3d6"} Nov 24 15:08:23 crc kubenswrapper[5039]: I1124 15:08:23.065664 5039 scope.go:117] "RemoveContainer" containerID="162327f222bdd9a9e85df1ada135ce8acb88b16dea6106a8d25fc549b6b5e7df" Nov 24 15:08:23 crc kubenswrapper[5039]: I1124 15:08:23.108303 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xqj6j"] Nov 24 15:08:23 crc kubenswrapper[5039]: I1124 15:08:23.108724 5039 scope.go:117] "RemoveContainer" containerID="552710a18803b3dbd1125f240a045c133c14c805a8efb9c7290459316d716c52" Nov 24 15:08:23 crc kubenswrapper[5039]: I1124 15:08:23.118294 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xqj6j"] Nov 24 15:08:23 crc kubenswrapper[5039]: I1124 15:08:23.138704 5039 scope.go:117] "RemoveContainer" containerID="699ad305b800b19ed96e65c4694b3afd0490648a4df00a3153fe9e06986f3486" Nov 24 15:08:23 crc kubenswrapper[5039]: I1124 15:08:23.203041 5039 scope.go:117] "RemoveContainer" containerID="162327f222bdd9a9e85df1ada135ce8acb88b16dea6106a8d25fc549b6b5e7df" Nov 24 15:08:23 crc kubenswrapper[5039]: E1124 15:08:23.203653 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"162327f222bdd9a9e85df1ada135ce8acb88b16dea6106a8d25fc549b6b5e7df\": container with ID starting with 162327f222bdd9a9e85df1ada135ce8acb88b16dea6106a8d25fc549b6b5e7df not found: ID does not exist" containerID="162327f222bdd9a9e85df1ada135ce8acb88b16dea6106a8d25fc549b6b5e7df" Nov 24 15:08:23 crc kubenswrapper[5039]: I1124 15:08:23.203714 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"162327f222bdd9a9e85df1ada135ce8acb88b16dea6106a8d25fc549b6b5e7df"} err="failed to get container status \"162327f222bdd9a9e85df1ada135ce8acb88b16dea6106a8d25fc549b6b5e7df\": rpc error: code = NotFound desc = could not find container \"162327f222bdd9a9e85df1ada135ce8acb88b16dea6106a8d25fc549b6b5e7df\": container with ID starting with 162327f222bdd9a9e85df1ada135ce8acb88b16dea6106a8d25fc549b6b5e7df not found: ID does not exist" Nov 24 15:08:23 crc kubenswrapper[5039]: I1124 15:08:23.203742 5039 scope.go:117] "RemoveContainer" containerID="552710a18803b3dbd1125f240a045c133c14c805a8efb9c7290459316d716c52" Nov 24 15:08:23 crc kubenswrapper[5039]: E1124 15:08:23.204190 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"552710a18803b3dbd1125f240a045c133c14c805a8efb9c7290459316d716c52\": container with ID starting with 552710a18803b3dbd1125f240a045c133c14c805a8efb9c7290459316d716c52 not found: ID does not exist" containerID="552710a18803b3dbd1125f240a045c133c14c805a8efb9c7290459316d716c52" Nov 24 15:08:23 crc kubenswrapper[5039]: I1124 15:08:23.204234 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"552710a18803b3dbd1125f240a045c133c14c805a8efb9c7290459316d716c52"} err="failed to get container status \"552710a18803b3dbd1125f240a045c133c14c805a8efb9c7290459316d716c52\": rpc error: code = NotFound desc = could not find container \"552710a18803b3dbd1125f240a045c133c14c805a8efb9c7290459316d716c52\": container with ID starting with 552710a18803b3dbd1125f240a045c133c14c805a8efb9c7290459316d716c52 not found: ID does not exist" Nov 24 15:08:23 crc kubenswrapper[5039]: I1124 15:08:23.204264 5039 scope.go:117] "RemoveContainer" containerID="699ad305b800b19ed96e65c4694b3afd0490648a4df00a3153fe9e06986f3486" Nov 24 15:08:23 crc kubenswrapper[5039]: E1124 15:08:23.204661 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"699ad305b800b19ed96e65c4694b3afd0490648a4df00a3153fe9e06986f3486\": container with ID starting with 699ad305b800b19ed96e65c4694b3afd0490648a4df00a3153fe9e06986f3486 not found: ID does not exist" containerID="699ad305b800b19ed96e65c4694b3afd0490648a4df00a3153fe9e06986f3486" Nov 24 15:08:23 crc kubenswrapper[5039]: I1124 15:08:23.204689 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"699ad305b800b19ed96e65c4694b3afd0490648a4df00a3153fe9e06986f3486"} err="failed to get container status \"699ad305b800b19ed96e65c4694b3afd0490648a4df00a3153fe9e06986f3486\": rpc error: code = NotFound desc = could not find container \"699ad305b800b19ed96e65c4694b3afd0490648a4df00a3153fe9e06986f3486\": container with ID starting with 699ad305b800b19ed96e65c4694b3afd0490648a4df00a3153fe9e06986f3486 not found: ID does not exist" Nov 24 15:08:24 crc kubenswrapper[5039]: I1124 15:08:24.321238 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9adafaaa-a543-480f-8c02-084b7180ad88" path="/var/lib/kubelet/pods/9adafaaa-a543-480f-8c02-084b7180ad88/volumes" Nov 24 15:08:31 crc kubenswrapper[5039]: I1124 15:08:31.306567 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:08:31 crc kubenswrapper[5039]: E1124 15:08:31.307585 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:08:46 crc kubenswrapper[5039]: I1124 15:08:46.306909 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:08:46 crc kubenswrapper[5039]: E1124 15:08:46.308008 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:08:59 crc kubenswrapper[5039]: I1124 15:08:59.307419 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:09:00 crc kubenswrapper[5039]: I1124 15:09:00.496885 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"a211a9c937d2afbfe7c6573e7ca039a9699e3651b4e18a07e35b912a8d12c59e"} Nov 24 15:09:29 crc kubenswrapper[5039]: I1124 15:09:29.770322 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fftdl"] Nov 24 15:09:29 crc kubenswrapper[5039]: E1124 15:09:29.771336 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9adafaaa-a543-480f-8c02-084b7180ad88" containerName="extract-utilities" Nov 24 15:09:29 crc kubenswrapper[5039]: I1124 15:09:29.771351 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="9adafaaa-a543-480f-8c02-084b7180ad88" containerName="extract-utilities" Nov 24 15:09:29 crc kubenswrapper[5039]: E1124 15:09:29.771373 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9adafaaa-a543-480f-8c02-084b7180ad88" containerName="extract-content" Nov 24 15:09:29 crc kubenswrapper[5039]: I1124 15:09:29.771381 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="9adafaaa-a543-480f-8c02-084b7180ad88" containerName="extract-content" Nov 24 15:09:29 crc kubenswrapper[5039]: E1124 15:09:29.771389 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9adafaaa-a543-480f-8c02-084b7180ad88" containerName="registry-server" Nov 24 15:09:29 crc kubenswrapper[5039]: I1124 15:09:29.771397 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="9adafaaa-a543-480f-8c02-084b7180ad88" containerName="registry-server" Nov 24 15:09:29 crc kubenswrapper[5039]: I1124 15:09:29.771628 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="9adafaaa-a543-480f-8c02-084b7180ad88" containerName="registry-server" Nov 24 15:09:29 crc kubenswrapper[5039]: I1124 15:09:29.773323 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fftdl" Nov 24 15:09:29 crc kubenswrapper[5039]: I1124 15:09:29.784419 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fftdl"] Nov 24 15:09:29 crc kubenswrapper[5039]: I1124 15:09:29.864596 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8815c7e7-4501-4801-ab6c-9636448833c8-utilities\") pod \"community-operators-fftdl\" (UID: \"8815c7e7-4501-4801-ab6c-9636448833c8\") " pod="openshift-marketplace/community-operators-fftdl" Nov 24 15:09:29 crc kubenswrapper[5039]: I1124 15:09:29.864743 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7jkh\" (UniqueName: \"kubernetes.io/projected/8815c7e7-4501-4801-ab6c-9636448833c8-kube-api-access-z7jkh\") pod \"community-operators-fftdl\" (UID: \"8815c7e7-4501-4801-ab6c-9636448833c8\") " pod="openshift-marketplace/community-operators-fftdl" Nov 24 15:09:29 crc kubenswrapper[5039]: I1124 15:09:29.864773 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8815c7e7-4501-4801-ab6c-9636448833c8-catalog-content\") pod \"community-operators-fftdl\" (UID: \"8815c7e7-4501-4801-ab6c-9636448833c8\") " pod="openshift-marketplace/community-operators-fftdl" Nov 24 15:09:29 crc kubenswrapper[5039]: I1124 15:09:29.967154 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8815c7e7-4501-4801-ab6c-9636448833c8-utilities\") pod \"community-operators-fftdl\" (UID: \"8815c7e7-4501-4801-ab6c-9636448833c8\") " pod="openshift-marketplace/community-operators-fftdl" Nov 24 15:09:29 crc kubenswrapper[5039]: I1124 15:09:29.967281 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7jkh\" (UniqueName: \"kubernetes.io/projected/8815c7e7-4501-4801-ab6c-9636448833c8-kube-api-access-z7jkh\") pod \"community-operators-fftdl\" (UID: \"8815c7e7-4501-4801-ab6c-9636448833c8\") " pod="openshift-marketplace/community-operators-fftdl" Nov 24 15:09:29 crc kubenswrapper[5039]: I1124 15:09:29.967310 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8815c7e7-4501-4801-ab6c-9636448833c8-catalog-content\") pod \"community-operators-fftdl\" (UID: \"8815c7e7-4501-4801-ab6c-9636448833c8\") " pod="openshift-marketplace/community-operators-fftdl" Nov 24 15:09:29 crc kubenswrapper[5039]: I1124 15:09:29.967765 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8815c7e7-4501-4801-ab6c-9636448833c8-catalog-content\") pod \"community-operators-fftdl\" (UID: \"8815c7e7-4501-4801-ab6c-9636448833c8\") " pod="openshift-marketplace/community-operators-fftdl" Nov 24 15:09:29 crc kubenswrapper[5039]: I1124 15:09:29.968178 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8815c7e7-4501-4801-ab6c-9636448833c8-utilities\") pod \"community-operators-fftdl\" (UID: \"8815c7e7-4501-4801-ab6c-9636448833c8\") " pod="openshift-marketplace/community-operators-fftdl" Nov 24 15:09:29 crc kubenswrapper[5039]: I1124 15:09:29.994431 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7jkh\" (UniqueName: \"kubernetes.io/projected/8815c7e7-4501-4801-ab6c-9636448833c8-kube-api-access-z7jkh\") pod \"community-operators-fftdl\" (UID: \"8815c7e7-4501-4801-ab6c-9636448833c8\") " pod="openshift-marketplace/community-operators-fftdl" Nov 24 15:09:30 crc kubenswrapper[5039]: I1124 15:09:30.096607 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fftdl" Nov 24 15:09:30 crc kubenswrapper[5039]: I1124 15:09:30.662375 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fftdl"] Nov 24 15:09:30 crc kubenswrapper[5039]: I1124 15:09:30.847399 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fftdl" event={"ID":"8815c7e7-4501-4801-ab6c-9636448833c8","Type":"ContainerStarted","Data":"e9cf817bfb7be62c569a645afaaa3d5797e2c0531b41607fcd9c9b6954110b0c"} Nov 24 15:09:31 crc kubenswrapper[5039]: I1124 15:09:31.864936 5039 generic.go:334] "Generic (PLEG): container finished" podID="8815c7e7-4501-4801-ab6c-9636448833c8" containerID="1c232dad8d1fc400149c9aeeac7733ee87153ace3c954979e44683c9b9cb08d0" exitCode=0 Nov 24 15:09:31 crc kubenswrapper[5039]: I1124 15:09:31.864980 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fftdl" event={"ID":"8815c7e7-4501-4801-ab6c-9636448833c8","Type":"ContainerDied","Data":"1c232dad8d1fc400149c9aeeac7733ee87153ace3c954979e44683c9b9cb08d0"} Nov 24 15:09:31 crc kubenswrapper[5039]: I1124 15:09:31.867963 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 15:09:33 crc kubenswrapper[5039]: I1124 15:09:33.889610 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fftdl" event={"ID":"8815c7e7-4501-4801-ab6c-9636448833c8","Type":"ContainerStarted","Data":"3535e43ee68f880fb5e08b05cd4375634b878dfc07702eda4d1e067f5dc6a884"} Nov 24 15:09:35 crc kubenswrapper[5039]: I1124 15:09:35.912911 5039 generic.go:334] "Generic (PLEG): container finished" podID="8815c7e7-4501-4801-ab6c-9636448833c8" containerID="3535e43ee68f880fb5e08b05cd4375634b878dfc07702eda4d1e067f5dc6a884" exitCode=0 Nov 24 15:09:35 crc kubenswrapper[5039]: I1124 15:09:35.913082 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fftdl" event={"ID":"8815c7e7-4501-4801-ab6c-9636448833c8","Type":"ContainerDied","Data":"3535e43ee68f880fb5e08b05cd4375634b878dfc07702eda4d1e067f5dc6a884"} Nov 24 15:09:36 crc kubenswrapper[5039]: I1124 15:09:36.924927 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fftdl" event={"ID":"8815c7e7-4501-4801-ab6c-9636448833c8","Type":"ContainerStarted","Data":"bd02202614002da4ffba6c7beb37676a4895b5fd0306f068c9a848451a133981"} Nov 24 15:09:36 crc kubenswrapper[5039]: I1124 15:09:36.949692 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fftdl" podStartSLOduration=3.424470323 podStartE2EDuration="7.949675575s" podCreationTimestamp="2025-11-24 15:09:29 +0000 UTC" firstStartedPulling="2025-11-24 15:09:31.867522318 +0000 UTC m=+6684.306646818" lastFinishedPulling="2025-11-24 15:09:36.39272758 +0000 UTC m=+6688.831852070" observedRunningTime="2025-11-24 15:09:36.943421142 +0000 UTC m=+6689.382545642" watchObservedRunningTime="2025-11-24 15:09:36.949675575 +0000 UTC m=+6689.388800075" Nov 24 15:09:40 crc kubenswrapper[5039]: I1124 15:09:40.097041 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fftdl" Nov 24 15:09:40 crc kubenswrapper[5039]: I1124 15:09:40.097386 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fftdl" Nov 24 15:09:40 crc kubenswrapper[5039]: I1124 15:09:40.148289 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fftdl" Nov 24 15:09:50 crc kubenswrapper[5039]: I1124 15:09:50.180713 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fftdl" Nov 24 15:09:50 crc kubenswrapper[5039]: I1124 15:09:50.250060 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fftdl"] Nov 24 15:09:51 crc kubenswrapper[5039]: I1124 15:09:51.098069 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fftdl" podUID="8815c7e7-4501-4801-ab6c-9636448833c8" containerName="registry-server" containerID="cri-o://bd02202614002da4ffba6c7beb37676a4895b5fd0306f068c9a848451a133981" gracePeriod=2 Nov 24 15:09:51 crc kubenswrapper[5039]: I1124 15:09:51.644222 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fftdl" Nov 24 15:09:51 crc kubenswrapper[5039]: I1124 15:09:51.808924 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z7jkh\" (UniqueName: \"kubernetes.io/projected/8815c7e7-4501-4801-ab6c-9636448833c8-kube-api-access-z7jkh\") pod \"8815c7e7-4501-4801-ab6c-9636448833c8\" (UID: \"8815c7e7-4501-4801-ab6c-9636448833c8\") " Nov 24 15:09:51 crc kubenswrapper[5039]: I1124 15:09:51.809337 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8815c7e7-4501-4801-ab6c-9636448833c8-utilities\") pod \"8815c7e7-4501-4801-ab6c-9636448833c8\" (UID: \"8815c7e7-4501-4801-ab6c-9636448833c8\") " Nov 24 15:09:51 crc kubenswrapper[5039]: I1124 15:09:51.809764 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8815c7e7-4501-4801-ab6c-9636448833c8-catalog-content\") pod \"8815c7e7-4501-4801-ab6c-9636448833c8\" (UID: \"8815c7e7-4501-4801-ab6c-9636448833c8\") " Nov 24 15:09:51 crc kubenswrapper[5039]: I1124 15:09:51.810229 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8815c7e7-4501-4801-ab6c-9636448833c8-utilities" (OuterVolumeSpecName: "utilities") pod "8815c7e7-4501-4801-ab6c-9636448833c8" (UID: "8815c7e7-4501-4801-ab6c-9636448833c8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:09:51 crc kubenswrapper[5039]: I1124 15:09:51.810888 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8815c7e7-4501-4801-ab6c-9636448833c8-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 15:09:51 crc kubenswrapper[5039]: I1124 15:09:51.814948 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8815c7e7-4501-4801-ab6c-9636448833c8-kube-api-access-z7jkh" (OuterVolumeSpecName: "kube-api-access-z7jkh") pod "8815c7e7-4501-4801-ab6c-9636448833c8" (UID: "8815c7e7-4501-4801-ab6c-9636448833c8"). InnerVolumeSpecName "kube-api-access-z7jkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:09:51 crc kubenswrapper[5039]: I1124 15:09:51.871420 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8815c7e7-4501-4801-ab6c-9636448833c8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8815c7e7-4501-4801-ab6c-9636448833c8" (UID: "8815c7e7-4501-4801-ab6c-9636448833c8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:09:51 crc kubenswrapper[5039]: I1124 15:09:51.912771 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8815c7e7-4501-4801-ab6c-9636448833c8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 15:09:51 crc kubenswrapper[5039]: I1124 15:09:51.912802 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z7jkh\" (UniqueName: \"kubernetes.io/projected/8815c7e7-4501-4801-ab6c-9636448833c8-kube-api-access-z7jkh\") on node \"crc\" DevicePath \"\"" Nov 24 15:09:52 crc kubenswrapper[5039]: I1124 15:09:52.114587 5039 generic.go:334] "Generic (PLEG): container finished" podID="8815c7e7-4501-4801-ab6c-9636448833c8" containerID="bd02202614002da4ffba6c7beb37676a4895b5fd0306f068c9a848451a133981" exitCode=0 Nov 24 15:09:52 crc kubenswrapper[5039]: I1124 15:09:52.114634 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fftdl" event={"ID":"8815c7e7-4501-4801-ab6c-9636448833c8","Type":"ContainerDied","Data":"bd02202614002da4ffba6c7beb37676a4895b5fd0306f068c9a848451a133981"} Nov 24 15:09:52 crc kubenswrapper[5039]: I1124 15:09:52.114675 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fftdl" event={"ID":"8815c7e7-4501-4801-ab6c-9636448833c8","Type":"ContainerDied","Data":"e9cf817bfb7be62c569a645afaaa3d5797e2c0531b41607fcd9c9b6954110b0c"} Nov 24 15:09:52 crc kubenswrapper[5039]: I1124 15:09:52.114689 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fftdl" Nov 24 15:09:52 crc kubenswrapper[5039]: I1124 15:09:52.114773 5039 scope.go:117] "RemoveContainer" containerID="bd02202614002da4ffba6c7beb37676a4895b5fd0306f068c9a848451a133981" Nov 24 15:09:52 crc kubenswrapper[5039]: I1124 15:09:52.151519 5039 scope.go:117] "RemoveContainer" containerID="3535e43ee68f880fb5e08b05cd4375634b878dfc07702eda4d1e067f5dc6a884" Nov 24 15:09:52 crc kubenswrapper[5039]: I1124 15:09:52.158211 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fftdl"] Nov 24 15:09:52 crc kubenswrapper[5039]: I1124 15:09:52.180932 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fftdl"] Nov 24 15:09:52 crc kubenswrapper[5039]: I1124 15:09:52.188820 5039 scope.go:117] "RemoveContainer" containerID="1c232dad8d1fc400149c9aeeac7733ee87153ace3c954979e44683c9b9cb08d0" Nov 24 15:09:52 crc kubenswrapper[5039]: I1124 15:09:52.243646 5039 scope.go:117] "RemoveContainer" containerID="bd02202614002da4ffba6c7beb37676a4895b5fd0306f068c9a848451a133981" Nov 24 15:09:52 crc kubenswrapper[5039]: E1124 15:09:52.244140 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd02202614002da4ffba6c7beb37676a4895b5fd0306f068c9a848451a133981\": container with ID starting with bd02202614002da4ffba6c7beb37676a4895b5fd0306f068c9a848451a133981 not found: ID does not exist" containerID="bd02202614002da4ffba6c7beb37676a4895b5fd0306f068c9a848451a133981" Nov 24 15:09:52 crc kubenswrapper[5039]: I1124 15:09:52.244186 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd02202614002da4ffba6c7beb37676a4895b5fd0306f068c9a848451a133981"} err="failed to get container status \"bd02202614002da4ffba6c7beb37676a4895b5fd0306f068c9a848451a133981\": rpc error: code = NotFound desc = could not find container \"bd02202614002da4ffba6c7beb37676a4895b5fd0306f068c9a848451a133981\": container with ID starting with bd02202614002da4ffba6c7beb37676a4895b5fd0306f068c9a848451a133981 not found: ID does not exist" Nov 24 15:09:52 crc kubenswrapper[5039]: I1124 15:09:52.244206 5039 scope.go:117] "RemoveContainer" containerID="3535e43ee68f880fb5e08b05cd4375634b878dfc07702eda4d1e067f5dc6a884" Nov 24 15:09:52 crc kubenswrapper[5039]: E1124 15:09:52.244798 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3535e43ee68f880fb5e08b05cd4375634b878dfc07702eda4d1e067f5dc6a884\": container with ID starting with 3535e43ee68f880fb5e08b05cd4375634b878dfc07702eda4d1e067f5dc6a884 not found: ID does not exist" containerID="3535e43ee68f880fb5e08b05cd4375634b878dfc07702eda4d1e067f5dc6a884" Nov 24 15:09:52 crc kubenswrapper[5039]: I1124 15:09:52.244847 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3535e43ee68f880fb5e08b05cd4375634b878dfc07702eda4d1e067f5dc6a884"} err="failed to get container status \"3535e43ee68f880fb5e08b05cd4375634b878dfc07702eda4d1e067f5dc6a884\": rpc error: code = NotFound desc = could not find container \"3535e43ee68f880fb5e08b05cd4375634b878dfc07702eda4d1e067f5dc6a884\": container with ID starting with 3535e43ee68f880fb5e08b05cd4375634b878dfc07702eda4d1e067f5dc6a884 not found: ID does not exist" Nov 24 15:09:52 crc kubenswrapper[5039]: I1124 15:09:52.244880 5039 scope.go:117] "RemoveContainer" containerID="1c232dad8d1fc400149c9aeeac7733ee87153ace3c954979e44683c9b9cb08d0" Nov 24 15:09:52 crc kubenswrapper[5039]: E1124 15:09:52.245350 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c232dad8d1fc400149c9aeeac7733ee87153ace3c954979e44683c9b9cb08d0\": container with ID starting with 1c232dad8d1fc400149c9aeeac7733ee87153ace3c954979e44683c9b9cb08d0 not found: ID does not exist" containerID="1c232dad8d1fc400149c9aeeac7733ee87153ace3c954979e44683c9b9cb08d0" Nov 24 15:09:52 crc kubenswrapper[5039]: I1124 15:09:52.245382 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c232dad8d1fc400149c9aeeac7733ee87153ace3c954979e44683c9b9cb08d0"} err="failed to get container status \"1c232dad8d1fc400149c9aeeac7733ee87153ace3c954979e44683c9b9cb08d0\": rpc error: code = NotFound desc = could not find container \"1c232dad8d1fc400149c9aeeac7733ee87153ace3c954979e44683c9b9cb08d0\": container with ID starting with 1c232dad8d1fc400149c9aeeac7733ee87153ace3c954979e44683c9b9cb08d0 not found: ID does not exist" Nov 24 15:09:52 crc kubenswrapper[5039]: I1124 15:09:52.323260 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8815c7e7-4501-4801-ab6c-9636448833c8" path="/var/lib/kubelet/pods/8815c7e7-4501-4801-ab6c-9636448833c8/volumes" Nov 24 15:11:20 crc kubenswrapper[5039]: I1124 15:11:20.101294 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 15:11:20 crc kubenswrapper[5039]: I1124 15:11:20.101914 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 15:11:50 crc kubenswrapper[5039]: I1124 15:11:50.101688 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 15:11:50 crc kubenswrapper[5039]: I1124 15:11:50.102359 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 15:12:20 crc kubenswrapper[5039]: I1124 15:12:20.102057 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 15:12:20 crc kubenswrapper[5039]: I1124 15:12:20.102796 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 15:12:20 crc kubenswrapper[5039]: I1124 15:12:20.102883 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 15:12:20 crc kubenswrapper[5039]: I1124 15:12:20.104056 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a211a9c937d2afbfe7c6573e7ca039a9699e3651b4e18a07e35b912a8d12c59e"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 15:12:20 crc kubenswrapper[5039]: I1124 15:12:20.104370 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://a211a9c937d2afbfe7c6573e7ca039a9699e3651b4e18a07e35b912a8d12c59e" gracePeriod=600 Nov 24 15:12:20 crc kubenswrapper[5039]: I1124 15:12:20.891443 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="a211a9c937d2afbfe7c6573e7ca039a9699e3651b4e18a07e35b912a8d12c59e" exitCode=0 Nov 24 15:12:20 crc kubenswrapper[5039]: I1124 15:12:20.891536 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"a211a9c937d2afbfe7c6573e7ca039a9699e3651b4e18a07e35b912a8d12c59e"} Nov 24 15:12:20 crc kubenswrapper[5039]: I1124 15:12:20.891904 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3"} Nov 24 15:12:20 crc kubenswrapper[5039]: I1124 15:12:20.891938 5039 scope.go:117] "RemoveContainer" containerID="36a018efceff2dceb2e39f7d2b6d8f2ec9c9d78aab8a277785d27bb4f412402b" Nov 24 15:14:20 crc kubenswrapper[5039]: I1124 15:14:20.101037 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 15:14:20 crc kubenswrapper[5039]: I1124 15:14:20.101644 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 15:14:45 crc kubenswrapper[5039]: I1124 15:14:45.553968 5039 generic.go:334] "Generic (PLEG): container finished" podID="e515a4f0-d838-4d61-906b-f26a0c07f8c8" containerID="43b1e426a69ff0a81ab7c12adb7bb64c76ca32ea690412b546ffaa39920dc01c" exitCode=0 Nov 24 15:14:45 crc kubenswrapper[5039]: I1124 15:14:45.554028 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"e515a4f0-d838-4d61-906b-f26a0c07f8c8","Type":"ContainerDied","Data":"43b1e426a69ff0a81ab7c12adb7bb64c76ca32ea690412b546ffaa39920dc01c"} Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.022851 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.105183 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e515a4f0-d838-4d61-906b-f26a0c07f8c8-config-data\") pod \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.105245 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.105302 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/e515a4f0-d838-4d61-906b-f26a0c07f8c8-test-operator-ephemeral-temporary\") pod \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.105375 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e515a4f0-d838-4d61-906b-f26a0c07f8c8-openstack-config-secret\") pod \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.105566 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e515a4f0-d838-4d61-906b-f26a0c07f8c8-ssh-key\") pod \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.106728 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfmsh\" (UniqueName: \"kubernetes.io/projected/e515a4f0-d838-4d61-906b-f26a0c07f8c8-kube-api-access-cfmsh\") pod \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.106785 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/e515a4f0-d838-4d61-906b-f26a0c07f8c8-test-operator-ephemeral-workdir\") pod \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.106836 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e515a4f0-d838-4d61-906b-f26a0c07f8c8-openstack-config\") pod \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.106962 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/e515a4f0-d838-4d61-906b-f26a0c07f8c8-ca-certs\") pod \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\" (UID: \"e515a4f0-d838-4d61-906b-f26a0c07f8c8\") " Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.106997 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e515a4f0-d838-4d61-906b-f26a0c07f8c8-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "e515a4f0-d838-4d61-906b-f26a0c07f8c8" (UID: "e515a4f0-d838-4d61-906b-f26a0c07f8c8"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.107190 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e515a4f0-d838-4d61-906b-f26a0c07f8c8-config-data" (OuterVolumeSpecName: "config-data") pod "e515a4f0-d838-4d61-906b-f26a0c07f8c8" (UID: "e515a4f0-d838-4d61-906b-f26a0c07f8c8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.107710 5039 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e515a4f0-d838-4d61-906b-f26a0c07f8c8-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.107734 5039 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/e515a4f0-d838-4d61-906b-f26a0c07f8c8-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.111729 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e515a4f0-d838-4d61-906b-f26a0c07f8c8-kube-api-access-cfmsh" (OuterVolumeSpecName: "kube-api-access-cfmsh") pod "e515a4f0-d838-4d61-906b-f26a0c07f8c8" (UID: "e515a4f0-d838-4d61-906b-f26a0c07f8c8"). InnerVolumeSpecName "kube-api-access-cfmsh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.113367 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "test-operator-logs") pod "e515a4f0-d838-4d61-906b-f26a0c07f8c8" (UID: "e515a4f0-d838-4d61-906b-f26a0c07f8c8"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.116200 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e515a4f0-d838-4d61-906b-f26a0c07f8c8-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "e515a4f0-d838-4d61-906b-f26a0c07f8c8" (UID: "e515a4f0-d838-4d61-906b-f26a0c07f8c8"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.142775 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e515a4f0-d838-4d61-906b-f26a0c07f8c8-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "e515a4f0-d838-4d61-906b-f26a0c07f8c8" (UID: "e515a4f0-d838-4d61-906b-f26a0c07f8c8"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.150357 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e515a4f0-d838-4d61-906b-f26a0c07f8c8-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e515a4f0-d838-4d61-906b-f26a0c07f8c8" (UID: "e515a4f0-d838-4d61-906b-f26a0c07f8c8"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.150645 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e515a4f0-d838-4d61-906b-f26a0c07f8c8-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "e515a4f0-d838-4d61-906b-f26a0c07f8c8" (UID: "e515a4f0-d838-4d61-906b-f26a0c07f8c8"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.199832 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e515a4f0-d838-4d61-906b-f26a0c07f8c8-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "e515a4f0-d838-4d61-906b-f26a0c07f8c8" (UID: "e515a4f0-d838-4d61-906b-f26a0c07f8c8"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.210886 5039 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.210920 5039 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e515a4f0-d838-4d61-906b-f26a0c07f8c8-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.210933 5039 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e515a4f0-d838-4d61-906b-f26a0c07f8c8-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.210942 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfmsh\" (UniqueName: \"kubernetes.io/projected/e515a4f0-d838-4d61-906b-f26a0c07f8c8-kube-api-access-cfmsh\") on node \"crc\" DevicePath \"\"" Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.210953 5039 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/e515a4f0-d838-4d61-906b-f26a0c07f8c8-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.210965 5039 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e515a4f0-d838-4d61-906b-f26a0c07f8c8-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.210975 5039 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/e515a4f0-d838-4d61-906b-f26a0c07f8c8-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.240668 5039 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.313460 5039 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.579339 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"e515a4f0-d838-4d61-906b-f26a0c07f8c8","Type":"ContainerDied","Data":"698fd31c4d636106bd6c5d08af1ff15643cae9bfe3f2a0f0f509788a4c67ffc6"} Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.579387 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="698fd31c4d636106bd6c5d08af1ff15643cae9bfe3f2a0f0f509788a4c67ffc6" Nov 24 15:14:47 crc kubenswrapper[5039]: I1124 15:14:47.579404 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 24 15:14:50 crc kubenswrapper[5039]: I1124 15:14:50.102264 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 15:14:50 crc kubenswrapper[5039]: I1124 15:14:50.102890 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 15:14:51 crc kubenswrapper[5039]: I1124 15:14:51.207627 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 24 15:14:51 crc kubenswrapper[5039]: E1124 15:14:51.208432 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8815c7e7-4501-4801-ab6c-9636448833c8" containerName="extract-content" Nov 24 15:14:51 crc kubenswrapper[5039]: I1124 15:14:51.208456 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8815c7e7-4501-4801-ab6c-9636448833c8" containerName="extract-content" Nov 24 15:14:51 crc kubenswrapper[5039]: E1124 15:14:51.208487 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8815c7e7-4501-4801-ab6c-9636448833c8" containerName="extract-utilities" Nov 24 15:14:51 crc kubenswrapper[5039]: I1124 15:14:51.208534 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8815c7e7-4501-4801-ab6c-9636448833c8" containerName="extract-utilities" Nov 24 15:14:51 crc kubenswrapper[5039]: E1124 15:14:51.208565 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8815c7e7-4501-4801-ab6c-9636448833c8" containerName="registry-server" Nov 24 15:14:51 crc kubenswrapper[5039]: I1124 15:14:51.208579 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8815c7e7-4501-4801-ab6c-9636448833c8" containerName="registry-server" Nov 24 15:14:51 crc kubenswrapper[5039]: E1124 15:14:51.208647 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e515a4f0-d838-4d61-906b-f26a0c07f8c8" containerName="tempest-tests-tempest-tests-runner" Nov 24 15:14:51 crc kubenswrapper[5039]: I1124 15:14:51.208661 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="e515a4f0-d838-4d61-906b-f26a0c07f8c8" containerName="tempest-tests-tempest-tests-runner" Nov 24 15:14:51 crc kubenswrapper[5039]: I1124 15:14:51.209133 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="8815c7e7-4501-4801-ab6c-9636448833c8" containerName="registry-server" Nov 24 15:14:51 crc kubenswrapper[5039]: I1124 15:14:51.209170 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="e515a4f0-d838-4d61-906b-f26a0c07f8c8" containerName="tempest-tests-tempest-tests-runner" Nov 24 15:14:51 crc kubenswrapper[5039]: I1124 15:14:51.210665 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 24 15:14:51 crc kubenswrapper[5039]: I1124 15:14:51.214862 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-5cncf" Nov 24 15:14:51 crc kubenswrapper[5039]: I1124 15:14:51.228017 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 24 15:14:51 crc kubenswrapper[5039]: I1124 15:14:51.317139 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2hg5\" (UniqueName: \"kubernetes.io/projected/2c05319a-d5d8-4585-8d73-0bc049535803-kube-api-access-q2hg5\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2c05319a-d5d8-4585-8d73-0bc049535803\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 24 15:14:51 crc kubenswrapper[5039]: I1124 15:14:51.317923 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2c05319a-d5d8-4585-8d73-0bc049535803\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 24 15:14:51 crc kubenswrapper[5039]: I1124 15:14:51.419535 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2hg5\" (UniqueName: \"kubernetes.io/projected/2c05319a-d5d8-4585-8d73-0bc049535803-kube-api-access-q2hg5\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2c05319a-d5d8-4585-8d73-0bc049535803\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 24 15:14:51 crc kubenswrapper[5039]: I1124 15:14:51.419771 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2c05319a-d5d8-4585-8d73-0bc049535803\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 24 15:14:51 crc kubenswrapper[5039]: I1124 15:14:51.420869 5039 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2c05319a-d5d8-4585-8d73-0bc049535803\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 24 15:14:51 crc kubenswrapper[5039]: I1124 15:14:51.447818 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2hg5\" (UniqueName: \"kubernetes.io/projected/2c05319a-d5d8-4585-8d73-0bc049535803-kube-api-access-q2hg5\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2c05319a-d5d8-4585-8d73-0bc049535803\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 24 15:14:51 crc kubenswrapper[5039]: I1124 15:14:51.451906 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2c05319a-d5d8-4585-8d73-0bc049535803\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 24 15:14:51 crc kubenswrapper[5039]: I1124 15:14:51.555244 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 24 15:14:52 crc kubenswrapper[5039]: I1124 15:14:52.043576 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 24 15:14:52 crc kubenswrapper[5039]: I1124 15:14:52.050022 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 15:14:52 crc kubenswrapper[5039]: I1124 15:14:52.637431 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"2c05319a-d5d8-4585-8d73-0bc049535803","Type":"ContainerStarted","Data":"7385e7f93eee8a67737726ecb88dece9770cdf8ed32edc485352c43fa870de02"} Nov 24 15:14:53 crc kubenswrapper[5039]: I1124 15:14:53.648394 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"2c05319a-d5d8-4585-8d73-0bc049535803","Type":"ContainerStarted","Data":"9eaffbaf04b75bfcc2407ebf63bb66ff14ef35cdec64e078de685768c9014db8"} Nov 24 15:14:53 crc kubenswrapper[5039]: I1124 15:14:53.667495 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=1.6720150660000002 podStartE2EDuration="2.667479481s" podCreationTimestamp="2025-11-24 15:14:51 +0000 UTC" firstStartedPulling="2025-11-24 15:14:52.049851533 +0000 UTC m=+7004.488976023" lastFinishedPulling="2025-11-24 15:14:53.045315938 +0000 UTC m=+7005.484440438" observedRunningTime="2025-11-24 15:14:53.667097872 +0000 UTC m=+7006.106222382" watchObservedRunningTime="2025-11-24 15:14:53.667479481 +0000 UTC m=+7006.106603981" Nov 24 15:15:00 crc kubenswrapper[5039]: I1124 15:15:00.185016 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399955-mlgkd"] Nov 24 15:15:00 crc kubenswrapper[5039]: I1124 15:15:00.186991 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399955-mlgkd" Nov 24 15:15:00 crc kubenswrapper[5039]: I1124 15:15:00.197211 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 15:15:00 crc kubenswrapper[5039]: I1124 15:15:00.197451 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 15:15:00 crc kubenswrapper[5039]: I1124 15:15:00.201001 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399955-mlgkd"] Nov 24 15:15:00 crc kubenswrapper[5039]: I1124 15:15:00.375232 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eff0e7b0-2e95-43d6-8d7c-f58c58432732-config-volume\") pod \"collect-profiles-29399955-mlgkd\" (UID: \"eff0e7b0-2e95-43d6-8d7c-f58c58432732\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399955-mlgkd" Nov 24 15:15:00 crc kubenswrapper[5039]: I1124 15:15:00.375373 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tlgn9\" (UniqueName: \"kubernetes.io/projected/eff0e7b0-2e95-43d6-8d7c-f58c58432732-kube-api-access-tlgn9\") pod \"collect-profiles-29399955-mlgkd\" (UID: \"eff0e7b0-2e95-43d6-8d7c-f58c58432732\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399955-mlgkd" Nov 24 15:15:00 crc kubenswrapper[5039]: I1124 15:15:00.375469 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/eff0e7b0-2e95-43d6-8d7c-f58c58432732-secret-volume\") pod \"collect-profiles-29399955-mlgkd\" (UID: \"eff0e7b0-2e95-43d6-8d7c-f58c58432732\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399955-mlgkd" Nov 24 15:15:00 crc kubenswrapper[5039]: I1124 15:15:00.477972 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eff0e7b0-2e95-43d6-8d7c-f58c58432732-config-volume\") pod \"collect-profiles-29399955-mlgkd\" (UID: \"eff0e7b0-2e95-43d6-8d7c-f58c58432732\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399955-mlgkd" Nov 24 15:15:00 crc kubenswrapper[5039]: I1124 15:15:00.478030 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tlgn9\" (UniqueName: \"kubernetes.io/projected/eff0e7b0-2e95-43d6-8d7c-f58c58432732-kube-api-access-tlgn9\") pod \"collect-profiles-29399955-mlgkd\" (UID: \"eff0e7b0-2e95-43d6-8d7c-f58c58432732\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399955-mlgkd" Nov 24 15:15:00 crc kubenswrapper[5039]: I1124 15:15:00.478078 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/eff0e7b0-2e95-43d6-8d7c-f58c58432732-secret-volume\") pod \"collect-profiles-29399955-mlgkd\" (UID: \"eff0e7b0-2e95-43d6-8d7c-f58c58432732\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399955-mlgkd" Nov 24 15:15:00 crc kubenswrapper[5039]: I1124 15:15:00.480769 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eff0e7b0-2e95-43d6-8d7c-f58c58432732-config-volume\") pod \"collect-profiles-29399955-mlgkd\" (UID: \"eff0e7b0-2e95-43d6-8d7c-f58c58432732\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399955-mlgkd" Nov 24 15:15:00 crc kubenswrapper[5039]: I1124 15:15:00.485296 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/eff0e7b0-2e95-43d6-8d7c-f58c58432732-secret-volume\") pod \"collect-profiles-29399955-mlgkd\" (UID: \"eff0e7b0-2e95-43d6-8d7c-f58c58432732\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399955-mlgkd" Nov 24 15:15:00 crc kubenswrapper[5039]: I1124 15:15:00.502575 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tlgn9\" (UniqueName: \"kubernetes.io/projected/eff0e7b0-2e95-43d6-8d7c-f58c58432732-kube-api-access-tlgn9\") pod \"collect-profiles-29399955-mlgkd\" (UID: \"eff0e7b0-2e95-43d6-8d7c-f58c58432732\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399955-mlgkd" Nov 24 15:15:00 crc kubenswrapper[5039]: I1124 15:15:00.507792 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399955-mlgkd" Nov 24 15:15:00 crc kubenswrapper[5039]: I1124 15:15:00.966348 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399955-mlgkd"] Nov 24 15:15:01 crc kubenswrapper[5039]: I1124 15:15:01.754780 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399955-mlgkd" event={"ID":"eff0e7b0-2e95-43d6-8d7c-f58c58432732","Type":"ContainerStarted","Data":"6268550f4014873609c2e5e81f3a192c6fbdf027bfe11f01d04c8f6b86317b28"} Nov 24 15:15:01 crc kubenswrapper[5039]: I1124 15:15:01.755924 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399955-mlgkd" event={"ID":"eff0e7b0-2e95-43d6-8d7c-f58c58432732","Type":"ContainerStarted","Data":"6059256985dc47816e4163b60ad08236562ada64848c6fa417eb3d70e63c8771"} Nov 24 15:15:01 crc kubenswrapper[5039]: I1124 15:15:01.775950 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29399955-mlgkd" podStartSLOduration=1.7759353519999999 podStartE2EDuration="1.775935352s" podCreationTimestamp="2025-11-24 15:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 15:15:01.771988966 +0000 UTC m=+7014.211113466" watchObservedRunningTime="2025-11-24 15:15:01.775935352 +0000 UTC m=+7014.215059852" Nov 24 15:15:02 crc kubenswrapper[5039]: I1124 15:15:02.775904 5039 generic.go:334] "Generic (PLEG): container finished" podID="eff0e7b0-2e95-43d6-8d7c-f58c58432732" containerID="6268550f4014873609c2e5e81f3a192c6fbdf027bfe11f01d04c8f6b86317b28" exitCode=0 Nov 24 15:15:02 crc kubenswrapper[5039]: I1124 15:15:02.775903 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399955-mlgkd" event={"ID":"eff0e7b0-2e95-43d6-8d7c-f58c58432732","Type":"ContainerDied","Data":"6268550f4014873609c2e5e81f3a192c6fbdf027bfe11f01d04c8f6b86317b28"} Nov 24 15:15:04 crc kubenswrapper[5039]: I1124 15:15:04.238485 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399955-mlgkd" Nov 24 15:15:04 crc kubenswrapper[5039]: I1124 15:15:04.274646 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tlgn9\" (UniqueName: \"kubernetes.io/projected/eff0e7b0-2e95-43d6-8d7c-f58c58432732-kube-api-access-tlgn9\") pod \"eff0e7b0-2e95-43d6-8d7c-f58c58432732\" (UID: \"eff0e7b0-2e95-43d6-8d7c-f58c58432732\") " Nov 24 15:15:04 crc kubenswrapper[5039]: I1124 15:15:04.274798 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/eff0e7b0-2e95-43d6-8d7c-f58c58432732-secret-volume\") pod \"eff0e7b0-2e95-43d6-8d7c-f58c58432732\" (UID: \"eff0e7b0-2e95-43d6-8d7c-f58c58432732\") " Nov 24 15:15:04 crc kubenswrapper[5039]: I1124 15:15:04.274864 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eff0e7b0-2e95-43d6-8d7c-f58c58432732-config-volume\") pod \"eff0e7b0-2e95-43d6-8d7c-f58c58432732\" (UID: \"eff0e7b0-2e95-43d6-8d7c-f58c58432732\") " Nov 24 15:15:04 crc kubenswrapper[5039]: I1124 15:15:04.275844 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eff0e7b0-2e95-43d6-8d7c-f58c58432732-config-volume" (OuterVolumeSpecName: "config-volume") pod "eff0e7b0-2e95-43d6-8d7c-f58c58432732" (UID: "eff0e7b0-2e95-43d6-8d7c-f58c58432732"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 15:15:04 crc kubenswrapper[5039]: I1124 15:15:04.281018 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eff0e7b0-2e95-43d6-8d7c-f58c58432732-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "eff0e7b0-2e95-43d6-8d7c-f58c58432732" (UID: "eff0e7b0-2e95-43d6-8d7c-f58c58432732"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 15:15:04 crc kubenswrapper[5039]: I1124 15:15:04.281087 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eff0e7b0-2e95-43d6-8d7c-f58c58432732-kube-api-access-tlgn9" (OuterVolumeSpecName: "kube-api-access-tlgn9") pod "eff0e7b0-2e95-43d6-8d7c-f58c58432732" (UID: "eff0e7b0-2e95-43d6-8d7c-f58c58432732"). InnerVolumeSpecName "kube-api-access-tlgn9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:15:04 crc kubenswrapper[5039]: I1124 15:15:04.377939 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tlgn9\" (UniqueName: \"kubernetes.io/projected/eff0e7b0-2e95-43d6-8d7c-f58c58432732-kube-api-access-tlgn9\") on node \"crc\" DevicePath \"\"" Nov 24 15:15:04 crc kubenswrapper[5039]: I1124 15:15:04.378003 5039 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/eff0e7b0-2e95-43d6-8d7c-f58c58432732-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 15:15:04 crc kubenswrapper[5039]: I1124 15:15:04.378025 5039 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eff0e7b0-2e95-43d6-8d7c-f58c58432732-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 15:15:04 crc kubenswrapper[5039]: I1124 15:15:04.801032 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399955-mlgkd" event={"ID":"eff0e7b0-2e95-43d6-8d7c-f58c58432732","Type":"ContainerDied","Data":"6059256985dc47816e4163b60ad08236562ada64848c6fa417eb3d70e63c8771"} Nov 24 15:15:04 crc kubenswrapper[5039]: I1124 15:15:04.801348 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6059256985dc47816e4163b60ad08236562ada64848c6fa417eb3d70e63c8771" Nov 24 15:15:04 crc kubenswrapper[5039]: I1124 15:15:04.801080 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399955-mlgkd" Nov 24 15:15:04 crc kubenswrapper[5039]: I1124 15:15:04.859938 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399910-jzhfg"] Nov 24 15:15:04 crc kubenswrapper[5039]: I1124 15:15:04.873800 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399910-jzhfg"] Nov 24 15:15:06 crc kubenswrapper[5039]: I1124 15:15:06.454854 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b38e1df-2624-4bb8-8656-51b4f9be00a0" path="/var/lib/kubelet/pods/5b38e1df-2624-4bb8-8656-51b4f9be00a0/volumes" Nov 24 15:15:20 crc kubenswrapper[5039]: I1124 15:15:20.101446 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 15:15:20 crc kubenswrapper[5039]: I1124 15:15:20.102282 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 15:15:20 crc kubenswrapper[5039]: I1124 15:15:20.102606 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 15:15:20 crc kubenswrapper[5039]: I1124 15:15:20.104701 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 15:15:20 crc kubenswrapper[5039]: I1124 15:15:20.104823 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" gracePeriod=600 Nov 24 15:15:20 crc kubenswrapper[5039]: E1124 15:15:20.238429 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:15:21 crc kubenswrapper[5039]: I1124 15:15:21.014031 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" exitCode=0 Nov 24 15:15:21 crc kubenswrapper[5039]: I1124 15:15:21.014103 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3"} Nov 24 15:15:21 crc kubenswrapper[5039]: I1124 15:15:21.014187 5039 scope.go:117] "RemoveContainer" containerID="a211a9c937d2afbfe7c6573e7ca039a9699e3651b4e18a07e35b912a8d12c59e" Nov 24 15:15:21 crc kubenswrapper[5039]: I1124 15:15:21.014986 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:15:21 crc kubenswrapper[5039]: E1124 15:15:21.015382 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:15:23 crc kubenswrapper[5039]: I1124 15:15:23.203046 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-76clq/must-gather-qq2vm"] Nov 24 15:15:23 crc kubenswrapper[5039]: E1124 15:15:23.204007 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eff0e7b0-2e95-43d6-8d7c-f58c58432732" containerName="collect-profiles" Nov 24 15:15:23 crc kubenswrapper[5039]: I1124 15:15:23.204020 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="eff0e7b0-2e95-43d6-8d7c-f58c58432732" containerName="collect-profiles" Nov 24 15:15:23 crc kubenswrapper[5039]: I1124 15:15:23.204260 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="eff0e7b0-2e95-43d6-8d7c-f58c58432732" containerName="collect-profiles" Nov 24 15:15:23 crc kubenswrapper[5039]: I1124 15:15:23.207180 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-76clq/must-gather-qq2vm" Nov 24 15:15:23 crc kubenswrapper[5039]: I1124 15:15:23.210352 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-76clq"/"kube-root-ca.crt" Nov 24 15:15:23 crc kubenswrapper[5039]: I1124 15:15:23.210708 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-76clq"/"openshift-service-ca.crt" Nov 24 15:15:23 crc kubenswrapper[5039]: I1124 15:15:23.238152 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-76clq/must-gather-qq2vm"] Nov 24 15:15:23 crc kubenswrapper[5039]: I1124 15:15:23.289903 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k24l7\" (UniqueName: \"kubernetes.io/projected/32832beb-344a-48ae-a105-25a29b2d4b1d-kube-api-access-k24l7\") pod \"must-gather-qq2vm\" (UID: \"32832beb-344a-48ae-a105-25a29b2d4b1d\") " pod="openshift-must-gather-76clq/must-gather-qq2vm" Nov 24 15:15:23 crc kubenswrapper[5039]: I1124 15:15:23.290408 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/32832beb-344a-48ae-a105-25a29b2d4b1d-must-gather-output\") pod \"must-gather-qq2vm\" (UID: \"32832beb-344a-48ae-a105-25a29b2d4b1d\") " pod="openshift-must-gather-76clq/must-gather-qq2vm" Nov 24 15:15:23 crc kubenswrapper[5039]: I1124 15:15:23.392219 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/32832beb-344a-48ae-a105-25a29b2d4b1d-must-gather-output\") pod \"must-gather-qq2vm\" (UID: \"32832beb-344a-48ae-a105-25a29b2d4b1d\") " pod="openshift-must-gather-76clq/must-gather-qq2vm" Nov 24 15:15:23 crc kubenswrapper[5039]: I1124 15:15:23.392309 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k24l7\" (UniqueName: \"kubernetes.io/projected/32832beb-344a-48ae-a105-25a29b2d4b1d-kube-api-access-k24l7\") pod \"must-gather-qq2vm\" (UID: \"32832beb-344a-48ae-a105-25a29b2d4b1d\") " pod="openshift-must-gather-76clq/must-gather-qq2vm" Nov 24 15:15:23 crc kubenswrapper[5039]: I1124 15:15:23.393255 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/32832beb-344a-48ae-a105-25a29b2d4b1d-must-gather-output\") pod \"must-gather-qq2vm\" (UID: \"32832beb-344a-48ae-a105-25a29b2d4b1d\") " pod="openshift-must-gather-76clq/must-gather-qq2vm" Nov 24 15:15:23 crc kubenswrapper[5039]: I1124 15:15:23.414093 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k24l7\" (UniqueName: \"kubernetes.io/projected/32832beb-344a-48ae-a105-25a29b2d4b1d-kube-api-access-k24l7\") pod \"must-gather-qq2vm\" (UID: \"32832beb-344a-48ae-a105-25a29b2d4b1d\") " pod="openshift-must-gather-76clq/must-gather-qq2vm" Nov 24 15:15:23 crc kubenswrapper[5039]: I1124 15:15:23.534688 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-76clq/must-gather-qq2vm" Nov 24 15:15:24 crc kubenswrapper[5039]: I1124 15:15:24.039814 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-76clq/must-gather-qq2vm"] Nov 24 15:15:24 crc kubenswrapper[5039]: I1124 15:15:24.052533 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-76clq/must-gather-qq2vm" event={"ID":"32832beb-344a-48ae-a105-25a29b2d4b1d","Type":"ContainerStarted","Data":"dc5db2fcc676dfe2e11876a970952a7e48ee1d6c1fbe6e56243a3d1a9ad7009c"} Nov 24 15:15:29 crc kubenswrapper[5039]: I1124 15:15:29.110234 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-76clq/must-gather-qq2vm" event={"ID":"32832beb-344a-48ae-a105-25a29b2d4b1d","Type":"ContainerStarted","Data":"fc78bdeaec3ba62dd002284d818691b0aa8bf1b5c6a6d4c02154dcd2edcd6c78"} Nov 24 15:15:29 crc kubenswrapper[5039]: I1124 15:15:29.110802 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-76clq/must-gather-qq2vm" event={"ID":"32832beb-344a-48ae-a105-25a29b2d4b1d","Type":"ContainerStarted","Data":"8e9a81e949051fabfc7cf5240ce64d395951d86a6d9c781bf8e2e059aa28549a"} Nov 24 15:15:29 crc kubenswrapper[5039]: I1124 15:15:29.130292 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-76clq/must-gather-qq2vm" podStartSLOduration=1.978204922 podStartE2EDuration="6.130267883s" podCreationTimestamp="2025-11-24 15:15:23 +0000 UTC" firstStartedPulling="2025-11-24 15:15:24.041684597 +0000 UTC m=+7036.480809097" lastFinishedPulling="2025-11-24 15:15:28.193747568 +0000 UTC m=+7040.632872058" observedRunningTime="2025-11-24 15:15:29.125689902 +0000 UTC m=+7041.564814412" watchObservedRunningTime="2025-11-24 15:15:29.130267883 +0000 UTC m=+7041.569392393" Nov 24 15:15:34 crc kubenswrapper[5039]: I1124 15:15:34.788850 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-76clq/crc-debug-8pgxz"] Nov 24 15:15:34 crc kubenswrapper[5039]: I1124 15:15:34.790682 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-76clq/crc-debug-8pgxz" Nov 24 15:15:34 crc kubenswrapper[5039]: I1124 15:15:34.798765 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-76clq"/"default-dockercfg-b4s79" Nov 24 15:15:34 crc kubenswrapper[5039]: I1124 15:15:34.862050 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8ddc45d3-40e3-495f-9869-8c252dc61164-host\") pod \"crc-debug-8pgxz\" (UID: \"8ddc45d3-40e3-495f-9869-8c252dc61164\") " pod="openshift-must-gather-76clq/crc-debug-8pgxz" Nov 24 15:15:34 crc kubenswrapper[5039]: I1124 15:15:34.862153 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmsjr\" (UniqueName: \"kubernetes.io/projected/8ddc45d3-40e3-495f-9869-8c252dc61164-kube-api-access-fmsjr\") pod \"crc-debug-8pgxz\" (UID: \"8ddc45d3-40e3-495f-9869-8c252dc61164\") " pod="openshift-must-gather-76clq/crc-debug-8pgxz" Nov 24 15:15:34 crc kubenswrapper[5039]: I1124 15:15:34.964530 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8ddc45d3-40e3-495f-9869-8c252dc61164-host\") pod \"crc-debug-8pgxz\" (UID: \"8ddc45d3-40e3-495f-9869-8c252dc61164\") " pod="openshift-must-gather-76clq/crc-debug-8pgxz" Nov 24 15:15:34 crc kubenswrapper[5039]: I1124 15:15:34.964643 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmsjr\" (UniqueName: \"kubernetes.io/projected/8ddc45d3-40e3-495f-9869-8c252dc61164-kube-api-access-fmsjr\") pod \"crc-debug-8pgxz\" (UID: \"8ddc45d3-40e3-495f-9869-8c252dc61164\") " pod="openshift-must-gather-76clq/crc-debug-8pgxz" Nov 24 15:15:34 crc kubenswrapper[5039]: I1124 15:15:34.964660 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8ddc45d3-40e3-495f-9869-8c252dc61164-host\") pod \"crc-debug-8pgxz\" (UID: \"8ddc45d3-40e3-495f-9869-8c252dc61164\") " pod="openshift-must-gather-76clq/crc-debug-8pgxz" Nov 24 15:15:34 crc kubenswrapper[5039]: I1124 15:15:34.998623 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmsjr\" (UniqueName: \"kubernetes.io/projected/8ddc45d3-40e3-495f-9869-8c252dc61164-kube-api-access-fmsjr\") pod \"crc-debug-8pgxz\" (UID: \"8ddc45d3-40e3-495f-9869-8c252dc61164\") " pod="openshift-must-gather-76clq/crc-debug-8pgxz" Nov 24 15:15:35 crc kubenswrapper[5039]: I1124 15:15:35.121661 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-76clq/crc-debug-8pgxz" Nov 24 15:15:35 crc kubenswrapper[5039]: I1124 15:15:35.306926 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:15:35 crc kubenswrapper[5039]: E1124 15:15:35.307198 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:15:36 crc kubenswrapper[5039]: I1124 15:15:36.202698 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-76clq/crc-debug-8pgxz" event={"ID":"8ddc45d3-40e3-495f-9869-8c252dc61164","Type":"ContainerStarted","Data":"d7768527b91b0ab4e977409e96f64b49e49e356c6b1baa481f717dc0f804318a"} Nov 24 15:15:36 crc kubenswrapper[5039]: E1124 15:15:36.246545 5039 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.175:34760->38.102.83.175:41425: write tcp 38.102.83.175:34760->38.102.83.175:41425: write: broken pipe Nov 24 15:15:48 crc kubenswrapper[5039]: I1124 15:15:48.350331 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-76clq/crc-debug-8pgxz" event={"ID":"8ddc45d3-40e3-495f-9869-8c252dc61164","Type":"ContainerStarted","Data":"b06ca7bfd51ae89a7fc61d0297c878da9e18b9b1359e0d2016f731077d15c981"} Nov 24 15:15:48 crc kubenswrapper[5039]: I1124 15:15:48.377428 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-76clq/crc-debug-8pgxz" podStartSLOduration=2.176797445 podStartE2EDuration="14.377404524s" podCreationTimestamp="2025-11-24 15:15:34 +0000 UTC" firstStartedPulling="2025-11-24 15:15:35.169672074 +0000 UTC m=+7047.608796574" lastFinishedPulling="2025-11-24 15:15:47.370279133 +0000 UTC m=+7059.809403653" observedRunningTime="2025-11-24 15:15:48.366150568 +0000 UTC m=+7060.805275078" watchObservedRunningTime="2025-11-24 15:15:48.377404524 +0000 UTC m=+7060.816529034" Nov 24 15:15:48 crc kubenswrapper[5039]: I1124 15:15:48.628574 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zl8wf"] Nov 24 15:15:48 crc kubenswrapper[5039]: I1124 15:15:48.630944 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zl8wf" Nov 24 15:15:48 crc kubenswrapper[5039]: I1124 15:15:48.645564 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zl8wf"] Nov 24 15:15:48 crc kubenswrapper[5039]: I1124 15:15:48.676616 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39182268-3149-4003-bc5d-2f065fa1e904-utilities\") pod \"redhat-operators-zl8wf\" (UID: \"39182268-3149-4003-bc5d-2f065fa1e904\") " pod="openshift-marketplace/redhat-operators-zl8wf" Nov 24 15:15:48 crc kubenswrapper[5039]: I1124 15:15:48.676740 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2qg6\" (UniqueName: \"kubernetes.io/projected/39182268-3149-4003-bc5d-2f065fa1e904-kube-api-access-v2qg6\") pod \"redhat-operators-zl8wf\" (UID: \"39182268-3149-4003-bc5d-2f065fa1e904\") " pod="openshift-marketplace/redhat-operators-zl8wf" Nov 24 15:15:48 crc kubenswrapper[5039]: I1124 15:15:48.677163 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39182268-3149-4003-bc5d-2f065fa1e904-catalog-content\") pod \"redhat-operators-zl8wf\" (UID: \"39182268-3149-4003-bc5d-2f065fa1e904\") " pod="openshift-marketplace/redhat-operators-zl8wf" Nov 24 15:15:48 crc kubenswrapper[5039]: I1124 15:15:48.779751 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2qg6\" (UniqueName: \"kubernetes.io/projected/39182268-3149-4003-bc5d-2f065fa1e904-kube-api-access-v2qg6\") pod \"redhat-operators-zl8wf\" (UID: \"39182268-3149-4003-bc5d-2f065fa1e904\") " pod="openshift-marketplace/redhat-operators-zl8wf" Nov 24 15:15:48 crc kubenswrapper[5039]: I1124 15:15:48.779951 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39182268-3149-4003-bc5d-2f065fa1e904-catalog-content\") pod \"redhat-operators-zl8wf\" (UID: \"39182268-3149-4003-bc5d-2f065fa1e904\") " pod="openshift-marketplace/redhat-operators-zl8wf" Nov 24 15:15:48 crc kubenswrapper[5039]: I1124 15:15:48.780071 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39182268-3149-4003-bc5d-2f065fa1e904-utilities\") pod \"redhat-operators-zl8wf\" (UID: \"39182268-3149-4003-bc5d-2f065fa1e904\") " pod="openshift-marketplace/redhat-operators-zl8wf" Nov 24 15:15:48 crc kubenswrapper[5039]: I1124 15:15:48.780555 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39182268-3149-4003-bc5d-2f065fa1e904-catalog-content\") pod \"redhat-operators-zl8wf\" (UID: \"39182268-3149-4003-bc5d-2f065fa1e904\") " pod="openshift-marketplace/redhat-operators-zl8wf" Nov 24 15:15:48 crc kubenswrapper[5039]: I1124 15:15:48.780623 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39182268-3149-4003-bc5d-2f065fa1e904-utilities\") pod \"redhat-operators-zl8wf\" (UID: \"39182268-3149-4003-bc5d-2f065fa1e904\") " pod="openshift-marketplace/redhat-operators-zl8wf" Nov 24 15:15:48 crc kubenswrapper[5039]: I1124 15:15:48.801223 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2qg6\" (UniqueName: \"kubernetes.io/projected/39182268-3149-4003-bc5d-2f065fa1e904-kube-api-access-v2qg6\") pod \"redhat-operators-zl8wf\" (UID: \"39182268-3149-4003-bc5d-2f065fa1e904\") " pod="openshift-marketplace/redhat-operators-zl8wf" Nov 24 15:15:48 crc kubenswrapper[5039]: I1124 15:15:48.954929 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zl8wf" Nov 24 15:15:49 crc kubenswrapper[5039]: I1124 15:15:49.306675 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:15:49 crc kubenswrapper[5039]: E1124 15:15:49.307272 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:15:49 crc kubenswrapper[5039]: I1124 15:15:49.522715 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zl8wf"] Nov 24 15:15:50 crc kubenswrapper[5039]: I1124 15:15:50.380745 5039 generic.go:334] "Generic (PLEG): container finished" podID="39182268-3149-4003-bc5d-2f065fa1e904" containerID="2d712533810f18dd31030a40fb6a67513fca9c0a480c991539a6f69c8c5c0572" exitCode=0 Nov 24 15:15:50 crc kubenswrapper[5039]: I1124 15:15:50.380790 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zl8wf" event={"ID":"39182268-3149-4003-bc5d-2f065fa1e904","Type":"ContainerDied","Data":"2d712533810f18dd31030a40fb6a67513fca9c0a480c991539a6f69c8c5c0572"} Nov 24 15:15:50 crc kubenswrapper[5039]: I1124 15:15:50.381286 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zl8wf" event={"ID":"39182268-3149-4003-bc5d-2f065fa1e904","Type":"ContainerStarted","Data":"090bab9becf24bcdec99aed660893c8e0d425eee60136e48fd4f547ec4b2efb3"} Nov 24 15:15:52 crc kubenswrapper[5039]: I1124 15:15:52.400874 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zl8wf" event={"ID":"39182268-3149-4003-bc5d-2f065fa1e904","Type":"ContainerStarted","Data":"f2bb9ec45f28d35fb8447c220033f55b9a890368437d4c2be649d90c221ea67d"} Nov 24 15:15:56 crc kubenswrapper[5039]: E1124 15:15:56.138610 5039 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod39182268_3149_4003_bc5d_2f065fa1e904.slice/crio-conmon-f2bb9ec45f28d35fb8447c220033f55b9a890368437d4c2be649d90c221ea67d.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod39182268_3149_4003_bc5d_2f065fa1e904.slice/crio-f2bb9ec45f28d35fb8447c220033f55b9a890368437d4c2be649d90c221ea67d.scope\": RecentStats: unable to find data in memory cache]" Nov 24 15:15:56 crc kubenswrapper[5039]: I1124 15:15:56.446051 5039 generic.go:334] "Generic (PLEG): container finished" podID="39182268-3149-4003-bc5d-2f065fa1e904" containerID="f2bb9ec45f28d35fb8447c220033f55b9a890368437d4c2be649d90c221ea67d" exitCode=0 Nov 24 15:15:56 crc kubenswrapper[5039]: I1124 15:15:56.446095 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zl8wf" event={"ID":"39182268-3149-4003-bc5d-2f065fa1e904","Type":"ContainerDied","Data":"f2bb9ec45f28d35fb8447c220033f55b9a890368437d4c2be649d90c221ea67d"} Nov 24 15:15:57 crc kubenswrapper[5039]: I1124 15:15:57.066155 5039 scope.go:117] "RemoveContainer" containerID="9456d55cdb9d8455b56028155a6448096807eeb277d65cfe469cc936bff2c229" Nov 24 15:16:01 crc kubenswrapper[5039]: I1124 15:16:01.307190 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:16:01 crc kubenswrapper[5039]: E1124 15:16:01.307849 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:16:01 crc kubenswrapper[5039]: I1124 15:16:01.508542 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zl8wf" event={"ID":"39182268-3149-4003-bc5d-2f065fa1e904","Type":"ContainerStarted","Data":"97ef60771286d91038b504eda7658d42a320d0ef3b0f633f96ef7f44b4581597"} Nov 24 15:16:01 crc kubenswrapper[5039]: I1124 15:16:01.537368 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zl8wf" podStartSLOduration=2.888131868 podStartE2EDuration="13.537349646s" podCreationTimestamp="2025-11-24 15:15:48 +0000 UTC" firstStartedPulling="2025-11-24 15:15:50.38285246 +0000 UTC m=+7062.821976960" lastFinishedPulling="2025-11-24 15:16:01.032070238 +0000 UTC m=+7073.471194738" observedRunningTime="2025-11-24 15:16:01.527200507 +0000 UTC m=+7073.966325007" watchObservedRunningTime="2025-11-24 15:16:01.537349646 +0000 UTC m=+7073.976474146" Nov 24 15:16:08 crc kubenswrapper[5039]: I1124 15:16:08.955786 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zl8wf" Nov 24 15:16:08 crc kubenswrapper[5039]: I1124 15:16:08.956316 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zl8wf" Nov 24 15:16:10 crc kubenswrapper[5039]: I1124 15:16:10.013580 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zl8wf" podUID="39182268-3149-4003-bc5d-2f065fa1e904" containerName="registry-server" probeResult="failure" output=< Nov 24 15:16:10 crc kubenswrapper[5039]: timeout: failed to connect service ":50051" within 1s Nov 24 15:16:10 crc kubenswrapper[5039]: > Nov 24 15:16:16 crc kubenswrapper[5039]: I1124 15:16:16.311531 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:16:16 crc kubenswrapper[5039]: E1124 15:16:16.312293 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:16:20 crc kubenswrapper[5039]: I1124 15:16:20.010646 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zl8wf" podUID="39182268-3149-4003-bc5d-2f065fa1e904" containerName="registry-server" probeResult="failure" output=< Nov 24 15:16:20 crc kubenswrapper[5039]: timeout: failed to connect service ":50051" within 1s Nov 24 15:16:20 crc kubenswrapper[5039]: > Nov 24 15:16:28 crc kubenswrapper[5039]: I1124 15:16:28.321233 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:16:28 crc kubenswrapper[5039]: E1124 15:16:28.322298 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:16:30 crc kubenswrapper[5039]: I1124 15:16:30.029024 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zl8wf" podUID="39182268-3149-4003-bc5d-2f065fa1e904" containerName="registry-server" probeResult="failure" output=< Nov 24 15:16:30 crc kubenswrapper[5039]: timeout: failed to connect service ":50051" within 1s Nov 24 15:16:30 crc kubenswrapper[5039]: > Nov 24 15:16:39 crc kubenswrapper[5039]: I1124 15:16:39.012464 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zl8wf" Nov 24 15:16:39 crc kubenswrapper[5039]: I1124 15:16:39.063459 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zl8wf" Nov 24 15:16:39 crc kubenswrapper[5039]: I1124 15:16:39.247985 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zl8wf"] Nov 24 15:16:39 crc kubenswrapper[5039]: I1124 15:16:39.944315 5039 generic.go:334] "Generic (PLEG): container finished" podID="8ddc45d3-40e3-495f-9869-8c252dc61164" containerID="b06ca7bfd51ae89a7fc61d0297c878da9e18b9b1359e0d2016f731077d15c981" exitCode=0 Nov 24 15:16:39 crc kubenswrapper[5039]: I1124 15:16:39.944413 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-76clq/crc-debug-8pgxz" event={"ID":"8ddc45d3-40e3-495f-9869-8c252dc61164","Type":"ContainerDied","Data":"b06ca7bfd51ae89a7fc61d0297c878da9e18b9b1359e0d2016f731077d15c981"} Nov 24 15:16:40 crc kubenswrapper[5039]: I1124 15:16:40.307455 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:16:40 crc kubenswrapper[5039]: E1124 15:16:40.307800 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:16:40 crc kubenswrapper[5039]: I1124 15:16:40.953392 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zl8wf" podUID="39182268-3149-4003-bc5d-2f065fa1e904" containerName="registry-server" containerID="cri-o://97ef60771286d91038b504eda7658d42a320d0ef3b0f633f96ef7f44b4581597" gracePeriod=2 Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.198272 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-76clq/crc-debug-8pgxz" Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.250606 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-76clq/crc-debug-8pgxz"] Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.255186 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-76clq/crc-debug-8pgxz"] Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.342741 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8ddc45d3-40e3-495f-9869-8c252dc61164-host\") pod \"8ddc45d3-40e3-495f-9869-8c252dc61164\" (UID: \"8ddc45d3-40e3-495f-9869-8c252dc61164\") " Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.342963 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fmsjr\" (UniqueName: \"kubernetes.io/projected/8ddc45d3-40e3-495f-9869-8c252dc61164-kube-api-access-fmsjr\") pod \"8ddc45d3-40e3-495f-9869-8c252dc61164\" (UID: \"8ddc45d3-40e3-495f-9869-8c252dc61164\") " Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.344621 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8ddc45d3-40e3-495f-9869-8c252dc61164-host" (OuterVolumeSpecName: "host") pod "8ddc45d3-40e3-495f-9869-8c252dc61164" (UID: "8ddc45d3-40e3-495f-9869-8c252dc61164"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.354450 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ddc45d3-40e3-495f-9869-8c252dc61164-kube-api-access-fmsjr" (OuterVolumeSpecName: "kube-api-access-fmsjr") pod "8ddc45d3-40e3-495f-9869-8c252dc61164" (UID: "8ddc45d3-40e3-495f-9869-8c252dc61164"). InnerVolumeSpecName "kube-api-access-fmsjr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.446263 5039 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8ddc45d3-40e3-495f-9869-8c252dc61164-host\") on node \"crc\" DevicePath \"\"" Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.447232 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fmsjr\" (UniqueName: \"kubernetes.io/projected/8ddc45d3-40e3-495f-9869-8c252dc61164-kube-api-access-fmsjr\") on node \"crc\" DevicePath \"\"" Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.459247 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zl8wf" Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.548205 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39182268-3149-4003-bc5d-2f065fa1e904-utilities\") pod \"39182268-3149-4003-bc5d-2f065fa1e904\" (UID: \"39182268-3149-4003-bc5d-2f065fa1e904\") " Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.548426 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v2qg6\" (UniqueName: \"kubernetes.io/projected/39182268-3149-4003-bc5d-2f065fa1e904-kube-api-access-v2qg6\") pod \"39182268-3149-4003-bc5d-2f065fa1e904\" (UID: \"39182268-3149-4003-bc5d-2f065fa1e904\") " Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.548605 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39182268-3149-4003-bc5d-2f065fa1e904-catalog-content\") pod \"39182268-3149-4003-bc5d-2f065fa1e904\" (UID: \"39182268-3149-4003-bc5d-2f065fa1e904\") " Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.549114 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39182268-3149-4003-bc5d-2f065fa1e904-utilities" (OuterVolumeSpecName: "utilities") pod "39182268-3149-4003-bc5d-2f065fa1e904" (UID: "39182268-3149-4003-bc5d-2f065fa1e904"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.552298 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39182268-3149-4003-bc5d-2f065fa1e904-kube-api-access-v2qg6" (OuterVolumeSpecName: "kube-api-access-v2qg6") pod "39182268-3149-4003-bc5d-2f065fa1e904" (UID: "39182268-3149-4003-bc5d-2f065fa1e904"). InnerVolumeSpecName "kube-api-access-v2qg6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.631530 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39182268-3149-4003-bc5d-2f065fa1e904-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "39182268-3149-4003-bc5d-2f065fa1e904" (UID: "39182268-3149-4003-bc5d-2f065fa1e904"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.651283 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v2qg6\" (UniqueName: \"kubernetes.io/projected/39182268-3149-4003-bc5d-2f065fa1e904-kube-api-access-v2qg6\") on node \"crc\" DevicePath \"\"" Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.651325 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39182268-3149-4003-bc5d-2f065fa1e904-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.651336 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39182268-3149-4003-bc5d-2f065fa1e904-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.971980 5039 generic.go:334] "Generic (PLEG): container finished" podID="39182268-3149-4003-bc5d-2f065fa1e904" containerID="97ef60771286d91038b504eda7658d42a320d0ef3b0f633f96ef7f44b4581597" exitCode=0 Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.972064 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zl8wf" event={"ID":"39182268-3149-4003-bc5d-2f065fa1e904","Type":"ContainerDied","Data":"97ef60771286d91038b504eda7658d42a320d0ef3b0f633f96ef7f44b4581597"} Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.972098 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zl8wf" Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.973020 5039 scope.go:117] "RemoveContainer" containerID="97ef60771286d91038b504eda7658d42a320d0ef3b0f633f96ef7f44b4581597" Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.972896 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zl8wf" event={"ID":"39182268-3149-4003-bc5d-2f065fa1e904","Type":"ContainerDied","Data":"090bab9becf24bcdec99aed660893c8e0d425eee60136e48fd4f547ec4b2efb3"} Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.978115 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d7768527b91b0ab4e977409e96f64b49e49e356c6b1baa481f717dc0f804318a" Nov 24 15:16:41 crc kubenswrapper[5039]: I1124 15:16:41.978163 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-76clq/crc-debug-8pgxz" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.071024 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zl8wf"] Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.076194 5039 scope.go:117] "RemoveContainer" containerID="f2bb9ec45f28d35fb8447c220033f55b9a890368437d4c2be649d90c221ea67d" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.080444 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zl8wf"] Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.094965 5039 scope.go:117] "RemoveContainer" containerID="2d712533810f18dd31030a40fb6a67513fca9c0a480c991539a6f69c8c5c0572" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.158245 5039 scope.go:117] "RemoveContainer" containerID="97ef60771286d91038b504eda7658d42a320d0ef3b0f633f96ef7f44b4581597" Nov 24 15:16:42 crc kubenswrapper[5039]: E1124 15:16:42.158739 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97ef60771286d91038b504eda7658d42a320d0ef3b0f633f96ef7f44b4581597\": container with ID starting with 97ef60771286d91038b504eda7658d42a320d0ef3b0f633f96ef7f44b4581597 not found: ID does not exist" containerID="97ef60771286d91038b504eda7658d42a320d0ef3b0f633f96ef7f44b4581597" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.158775 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97ef60771286d91038b504eda7658d42a320d0ef3b0f633f96ef7f44b4581597"} err="failed to get container status \"97ef60771286d91038b504eda7658d42a320d0ef3b0f633f96ef7f44b4581597\": rpc error: code = NotFound desc = could not find container \"97ef60771286d91038b504eda7658d42a320d0ef3b0f633f96ef7f44b4581597\": container with ID starting with 97ef60771286d91038b504eda7658d42a320d0ef3b0f633f96ef7f44b4581597 not found: ID does not exist" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.158799 5039 scope.go:117] "RemoveContainer" containerID="f2bb9ec45f28d35fb8447c220033f55b9a890368437d4c2be649d90c221ea67d" Nov 24 15:16:42 crc kubenswrapper[5039]: E1124 15:16:42.159056 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2bb9ec45f28d35fb8447c220033f55b9a890368437d4c2be649d90c221ea67d\": container with ID starting with f2bb9ec45f28d35fb8447c220033f55b9a890368437d4c2be649d90c221ea67d not found: ID does not exist" containerID="f2bb9ec45f28d35fb8447c220033f55b9a890368437d4c2be649d90c221ea67d" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.159088 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2bb9ec45f28d35fb8447c220033f55b9a890368437d4c2be649d90c221ea67d"} err="failed to get container status \"f2bb9ec45f28d35fb8447c220033f55b9a890368437d4c2be649d90c221ea67d\": rpc error: code = NotFound desc = could not find container \"f2bb9ec45f28d35fb8447c220033f55b9a890368437d4c2be649d90c221ea67d\": container with ID starting with f2bb9ec45f28d35fb8447c220033f55b9a890368437d4c2be649d90c221ea67d not found: ID does not exist" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.159107 5039 scope.go:117] "RemoveContainer" containerID="2d712533810f18dd31030a40fb6a67513fca9c0a480c991539a6f69c8c5c0572" Nov 24 15:16:42 crc kubenswrapper[5039]: E1124 15:16:42.159693 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d712533810f18dd31030a40fb6a67513fca9c0a480c991539a6f69c8c5c0572\": container with ID starting with 2d712533810f18dd31030a40fb6a67513fca9c0a480c991539a6f69c8c5c0572 not found: ID does not exist" containerID="2d712533810f18dd31030a40fb6a67513fca9c0a480c991539a6f69c8c5c0572" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.159727 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d712533810f18dd31030a40fb6a67513fca9c0a480c991539a6f69c8c5c0572"} err="failed to get container status \"2d712533810f18dd31030a40fb6a67513fca9c0a480c991539a6f69c8c5c0572\": rpc error: code = NotFound desc = could not find container \"2d712533810f18dd31030a40fb6a67513fca9c0a480c991539a6f69c8c5c0572\": container with ID starting with 2d712533810f18dd31030a40fb6a67513fca9c0a480c991539a6f69c8c5c0572 not found: ID does not exist" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.320255 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39182268-3149-4003-bc5d-2f065fa1e904" path="/var/lib/kubelet/pods/39182268-3149-4003-bc5d-2f065fa1e904/volumes" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.322218 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ddc45d3-40e3-495f-9869-8c252dc61164" path="/var/lib/kubelet/pods/8ddc45d3-40e3-495f-9869-8c252dc61164/volumes" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.420842 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-76clq/crc-debug-sxwpr"] Nov 24 15:16:42 crc kubenswrapper[5039]: E1124 15:16:42.421269 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39182268-3149-4003-bc5d-2f065fa1e904" containerName="registry-server" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.421283 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="39182268-3149-4003-bc5d-2f065fa1e904" containerName="registry-server" Nov 24 15:16:42 crc kubenswrapper[5039]: E1124 15:16:42.421298 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ddc45d3-40e3-495f-9869-8c252dc61164" containerName="container-00" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.421304 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ddc45d3-40e3-495f-9869-8c252dc61164" containerName="container-00" Nov 24 15:16:42 crc kubenswrapper[5039]: E1124 15:16:42.421322 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39182268-3149-4003-bc5d-2f065fa1e904" containerName="extract-utilities" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.421328 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="39182268-3149-4003-bc5d-2f065fa1e904" containerName="extract-utilities" Nov 24 15:16:42 crc kubenswrapper[5039]: E1124 15:16:42.421367 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39182268-3149-4003-bc5d-2f065fa1e904" containerName="extract-content" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.421373 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="39182268-3149-4003-bc5d-2f065fa1e904" containerName="extract-content" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.421598 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="39182268-3149-4003-bc5d-2f065fa1e904" containerName="registry-server" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.421621 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ddc45d3-40e3-495f-9869-8c252dc61164" containerName="container-00" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.422336 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-76clq/crc-debug-sxwpr" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.423786 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-76clq"/"default-dockercfg-b4s79" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.575076 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/24bcdc35-3b2e-4ba2-8aa6-35af84f36118-host\") pod \"crc-debug-sxwpr\" (UID: \"24bcdc35-3b2e-4ba2-8aa6-35af84f36118\") " pod="openshift-must-gather-76clq/crc-debug-sxwpr" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.575228 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4hmks\" (UniqueName: \"kubernetes.io/projected/24bcdc35-3b2e-4ba2-8aa6-35af84f36118-kube-api-access-4hmks\") pod \"crc-debug-sxwpr\" (UID: \"24bcdc35-3b2e-4ba2-8aa6-35af84f36118\") " pod="openshift-must-gather-76clq/crc-debug-sxwpr" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.676824 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4hmks\" (UniqueName: \"kubernetes.io/projected/24bcdc35-3b2e-4ba2-8aa6-35af84f36118-kube-api-access-4hmks\") pod \"crc-debug-sxwpr\" (UID: \"24bcdc35-3b2e-4ba2-8aa6-35af84f36118\") " pod="openshift-must-gather-76clq/crc-debug-sxwpr" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.677004 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/24bcdc35-3b2e-4ba2-8aa6-35af84f36118-host\") pod \"crc-debug-sxwpr\" (UID: \"24bcdc35-3b2e-4ba2-8aa6-35af84f36118\") " pod="openshift-must-gather-76clq/crc-debug-sxwpr" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.677116 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/24bcdc35-3b2e-4ba2-8aa6-35af84f36118-host\") pod \"crc-debug-sxwpr\" (UID: \"24bcdc35-3b2e-4ba2-8aa6-35af84f36118\") " pod="openshift-must-gather-76clq/crc-debug-sxwpr" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.698143 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4hmks\" (UniqueName: \"kubernetes.io/projected/24bcdc35-3b2e-4ba2-8aa6-35af84f36118-kube-api-access-4hmks\") pod \"crc-debug-sxwpr\" (UID: \"24bcdc35-3b2e-4ba2-8aa6-35af84f36118\") " pod="openshift-must-gather-76clq/crc-debug-sxwpr" Nov 24 15:16:42 crc kubenswrapper[5039]: I1124 15:16:42.741710 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-76clq/crc-debug-sxwpr" Nov 24 15:16:43 crc kubenswrapper[5039]: I1124 15:16:43.008206 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-76clq/crc-debug-sxwpr" event={"ID":"24bcdc35-3b2e-4ba2-8aa6-35af84f36118","Type":"ContainerStarted","Data":"732bfa03efd7e3cd9c879af00d0bacb9702068f5f399f22beb35030af89d03a4"} Nov 24 15:16:44 crc kubenswrapper[5039]: I1124 15:16:44.027337 5039 generic.go:334] "Generic (PLEG): container finished" podID="24bcdc35-3b2e-4ba2-8aa6-35af84f36118" containerID="eee1d977deb0f22f4ced149bc1b038ef2001e44d86b2500817c2fa4d6330bd82" exitCode=0 Nov 24 15:16:44 crc kubenswrapper[5039]: I1124 15:16:44.027413 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-76clq/crc-debug-sxwpr" event={"ID":"24bcdc35-3b2e-4ba2-8aa6-35af84f36118","Type":"ContainerDied","Data":"eee1d977deb0f22f4ced149bc1b038ef2001e44d86b2500817c2fa4d6330bd82"} Nov 24 15:16:45 crc kubenswrapper[5039]: I1124 15:16:45.174160 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-76clq/crc-debug-sxwpr" Nov 24 15:16:45 crc kubenswrapper[5039]: I1124 15:16:45.248069 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4hmks\" (UniqueName: \"kubernetes.io/projected/24bcdc35-3b2e-4ba2-8aa6-35af84f36118-kube-api-access-4hmks\") pod \"24bcdc35-3b2e-4ba2-8aa6-35af84f36118\" (UID: \"24bcdc35-3b2e-4ba2-8aa6-35af84f36118\") " Nov 24 15:16:45 crc kubenswrapper[5039]: I1124 15:16:45.248260 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/24bcdc35-3b2e-4ba2-8aa6-35af84f36118-host\") pod \"24bcdc35-3b2e-4ba2-8aa6-35af84f36118\" (UID: \"24bcdc35-3b2e-4ba2-8aa6-35af84f36118\") " Nov 24 15:16:45 crc kubenswrapper[5039]: I1124 15:16:45.248409 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/24bcdc35-3b2e-4ba2-8aa6-35af84f36118-host" (OuterVolumeSpecName: "host") pod "24bcdc35-3b2e-4ba2-8aa6-35af84f36118" (UID: "24bcdc35-3b2e-4ba2-8aa6-35af84f36118"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 15:16:45 crc kubenswrapper[5039]: I1124 15:16:45.248851 5039 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/24bcdc35-3b2e-4ba2-8aa6-35af84f36118-host\") on node \"crc\" DevicePath \"\"" Nov 24 15:16:45 crc kubenswrapper[5039]: I1124 15:16:45.253119 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24bcdc35-3b2e-4ba2-8aa6-35af84f36118-kube-api-access-4hmks" (OuterVolumeSpecName: "kube-api-access-4hmks") pod "24bcdc35-3b2e-4ba2-8aa6-35af84f36118" (UID: "24bcdc35-3b2e-4ba2-8aa6-35af84f36118"). InnerVolumeSpecName "kube-api-access-4hmks". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:16:45 crc kubenswrapper[5039]: I1124 15:16:45.350750 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4hmks\" (UniqueName: \"kubernetes.io/projected/24bcdc35-3b2e-4ba2-8aa6-35af84f36118-kube-api-access-4hmks\") on node \"crc\" DevicePath \"\"" Nov 24 15:16:46 crc kubenswrapper[5039]: I1124 15:16:46.048392 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-76clq/crc-debug-sxwpr" event={"ID":"24bcdc35-3b2e-4ba2-8aa6-35af84f36118","Type":"ContainerDied","Data":"732bfa03efd7e3cd9c879af00d0bacb9702068f5f399f22beb35030af89d03a4"} Nov 24 15:16:46 crc kubenswrapper[5039]: I1124 15:16:46.048669 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="732bfa03efd7e3cd9c879af00d0bacb9702068f5f399f22beb35030af89d03a4" Nov 24 15:16:46 crc kubenswrapper[5039]: I1124 15:16:46.048730 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-76clq/crc-debug-sxwpr" Nov 24 15:16:46 crc kubenswrapper[5039]: I1124 15:16:46.565935 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-76clq/crc-debug-sxwpr"] Nov 24 15:16:46 crc kubenswrapper[5039]: I1124 15:16:46.574746 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-76clq/crc-debug-sxwpr"] Nov 24 15:16:47 crc kubenswrapper[5039]: I1124 15:16:47.757945 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-76clq/crc-debug-h5k5t"] Nov 24 15:16:47 crc kubenswrapper[5039]: E1124 15:16:47.758691 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24bcdc35-3b2e-4ba2-8aa6-35af84f36118" containerName="container-00" Nov 24 15:16:47 crc kubenswrapper[5039]: I1124 15:16:47.758702 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="24bcdc35-3b2e-4ba2-8aa6-35af84f36118" containerName="container-00" Nov 24 15:16:47 crc kubenswrapper[5039]: I1124 15:16:47.758957 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="24bcdc35-3b2e-4ba2-8aa6-35af84f36118" containerName="container-00" Nov 24 15:16:47 crc kubenswrapper[5039]: I1124 15:16:47.759760 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-76clq/crc-debug-h5k5t" Nov 24 15:16:47 crc kubenswrapper[5039]: I1124 15:16:47.765568 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-76clq"/"default-dockercfg-b4s79" Nov 24 15:16:47 crc kubenswrapper[5039]: I1124 15:16:47.902361 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8408254e-8bf2-4bd0-a5da-6d8556278a6d-host\") pod \"crc-debug-h5k5t\" (UID: \"8408254e-8bf2-4bd0-a5da-6d8556278a6d\") " pod="openshift-must-gather-76clq/crc-debug-h5k5t" Nov 24 15:16:47 crc kubenswrapper[5039]: I1124 15:16:47.902786 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6fhc\" (UniqueName: \"kubernetes.io/projected/8408254e-8bf2-4bd0-a5da-6d8556278a6d-kube-api-access-b6fhc\") pod \"crc-debug-h5k5t\" (UID: \"8408254e-8bf2-4bd0-a5da-6d8556278a6d\") " pod="openshift-must-gather-76clq/crc-debug-h5k5t" Nov 24 15:16:48 crc kubenswrapper[5039]: I1124 15:16:48.004653 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8408254e-8bf2-4bd0-a5da-6d8556278a6d-host\") pod \"crc-debug-h5k5t\" (UID: \"8408254e-8bf2-4bd0-a5da-6d8556278a6d\") " pod="openshift-must-gather-76clq/crc-debug-h5k5t" Nov 24 15:16:48 crc kubenswrapper[5039]: I1124 15:16:48.004800 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8408254e-8bf2-4bd0-a5da-6d8556278a6d-host\") pod \"crc-debug-h5k5t\" (UID: \"8408254e-8bf2-4bd0-a5da-6d8556278a6d\") " pod="openshift-must-gather-76clq/crc-debug-h5k5t" Nov 24 15:16:48 crc kubenswrapper[5039]: I1124 15:16:48.004829 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6fhc\" (UniqueName: \"kubernetes.io/projected/8408254e-8bf2-4bd0-a5da-6d8556278a6d-kube-api-access-b6fhc\") pod \"crc-debug-h5k5t\" (UID: \"8408254e-8bf2-4bd0-a5da-6d8556278a6d\") " pod="openshift-must-gather-76clq/crc-debug-h5k5t" Nov 24 15:16:48 crc kubenswrapper[5039]: I1124 15:16:48.029733 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6fhc\" (UniqueName: \"kubernetes.io/projected/8408254e-8bf2-4bd0-a5da-6d8556278a6d-kube-api-access-b6fhc\") pod \"crc-debug-h5k5t\" (UID: \"8408254e-8bf2-4bd0-a5da-6d8556278a6d\") " pod="openshift-must-gather-76clq/crc-debug-h5k5t" Nov 24 15:16:48 crc kubenswrapper[5039]: I1124 15:16:48.077945 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-76clq/crc-debug-h5k5t" Nov 24 15:16:48 crc kubenswrapper[5039]: I1124 15:16:48.323192 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24bcdc35-3b2e-4ba2-8aa6-35af84f36118" path="/var/lib/kubelet/pods/24bcdc35-3b2e-4ba2-8aa6-35af84f36118/volumes" Nov 24 15:16:49 crc kubenswrapper[5039]: I1124 15:16:49.083728 5039 generic.go:334] "Generic (PLEG): container finished" podID="8408254e-8bf2-4bd0-a5da-6d8556278a6d" containerID="d847f2e71a815758a933e8a2d8865f5290ed4c5e7dc4c09d5c857e9ff8b1f3f7" exitCode=0 Nov 24 15:16:49 crc kubenswrapper[5039]: I1124 15:16:49.083863 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-76clq/crc-debug-h5k5t" event={"ID":"8408254e-8bf2-4bd0-a5da-6d8556278a6d","Type":"ContainerDied","Data":"d847f2e71a815758a933e8a2d8865f5290ed4c5e7dc4c09d5c857e9ff8b1f3f7"} Nov 24 15:16:49 crc kubenswrapper[5039]: I1124 15:16:49.084076 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-76clq/crc-debug-h5k5t" event={"ID":"8408254e-8bf2-4bd0-a5da-6d8556278a6d","Type":"ContainerStarted","Data":"9d63208cc13c6d457f8932937b33119e764097fc632b87f9523cf76d37b29593"} Nov 24 15:16:49 crc kubenswrapper[5039]: I1124 15:16:49.129968 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-76clq/crc-debug-h5k5t"] Nov 24 15:16:49 crc kubenswrapper[5039]: I1124 15:16:49.148683 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-76clq/crc-debug-h5k5t"] Nov 24 15:16:50 crc kubenswrapper[5039]: I1124 15:16:50.236828 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-76clq/crc-debug-h5k5t" Nov 24 15:16:50 crc kubenswrapper[5039]: I1124 15:16:50.357599 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8408254e-8bf2-4bd0-a5da-6d8556278a6d-host\") pod \"8408254e-8bf2-4bd0-a5da-6d8556278a6d\" (UID: \"8408254e-8bf2-4bd0-a5da-6d8556278a6d\") " Nov 24 15:16:50 crc kubenswrapper[5039]: I1124 15:16:50.357715 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b6fhc\" (UniqueName: \"kubernetes.io/projected/8408254e-8bf2-4bd0-a5da-6d8556278a6d-kube-api-access-b6fhc\") pod \"8408254e-8bf2-4bd0-a5da-6d8556278a6d\" (UID: \"8408254e-8bf2-4bd0-a5da-6d8556278a6d\") " Nov 24 15:16:50 crc kubenswrapper[5039]: I1124 15:16:50.357832 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8408254e-8bf2-4bd0-a5da-6d8556278a6d-host" (OuterVolumeSpecName: "host") pod "8408254e-8bf2-4bd0-a5da-6d8556278a6d" (UID: "8408254e-8bf2-4bd0-a5da-6d8556278a6d"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 15:16:50 crc kubenswrapper[5039]: I1124 15:16:50.358591 5039 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8408254e-8bf2-4bd0-a5da-6d8556278a6d-host\") on node \"crc\" DevicePath \"\"" Nov 24 15:16:50 crc kubenswrapper[5039]: I1124 15:16:50.364376 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8408254e-8bf2-4bd0-a5da-6d8556278a6d-kube-api-access-b6fhc" (OuterVolumeSpecName: "kube-api-access-b6fhc") pod "8408254e-8bf2-4bd0-a5da-6d8556278a6d" (UID: "8408254e-8bf2-4bd0-a5da-6d8556278a6d"). InnerVolumeSpecName "kube-api-access-b6fhc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:16:50 crc kubenswrapper[5039]: I1124 15:16:50.460826 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b6fhc\" (UniqueName: \"kubernetes.io/projected/8408254e-8bf2-4bd0-a5da-6d8556278a6d-kube-api-access-b6fhc\") on node \"crc\" DevicePath \"\"" Nov 24 15:16:51 crc kubenswrapper[5039]: I1124 15:16:51.111701 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-76clq/crc-debug-h5k5t" Nov 24 15:16:51 crc kubenswrapper[5039]: I1124 15:16:51.111730 5039 scope.go:117] "RemoveContainer" containerID="d847f2e71a815758a933e8a2d8865f5290ed4c5e7dc4c09d5c857e9ff8b1f3f7" Nov 24 15:16:51 crc kubenswrapper[5039]: I1124 15:16:51.307838 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:16:51 crc kubenswrapper[5039]: E1124 15:16:51.308425 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:16:52 crc kubenswrapper[5039]: I1124 15:16:52.334762 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8408254e-8bf2-4bd0-a5da-6d8556278a6d" path="/var/lib/kubelet/pods/8408254e-8bf2-4bd0-a5da-6d8556278a6d/volumes" Nov 24 15:17:06 crc kubenswrapper[5039]: I1124 15:17:06.306708 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:17:06 crc kubenswrapper[5039]: E1124 15:17:06.307628 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:17:17 crc kubenswrapper[5039]: I1124 15:17:17.308082 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:17:17 crc kubenswrapper[5039]: E1124 15:17:17.308857 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:17:28 crc kubenswrapper[5039]: I1124 15:17:28.330196 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:17:28 crc kubenswrapper[5039]: E1124 15:17:28.331286 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:17:34 crc kubenswrapper[5039]: I1124 15:17:34.232552 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_0201ebd4-dc90-4332-b036-38d4d2a1ea2a/aodh-api/0.log" Nov 24 15:17:34 crc kubenswrapper[5039]: I1124 15:17:34.374209 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_0201ebd4-dc90-4332-b036-38d4d2a1ea2a/aodh-evaluator/0.log" Nov 24 15:17:34 crc kubenswrapper[5039]: I1124 15:17:34.476722 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_0201ebd4-dc90-4332-b036-38d4d2a1ea2a/aodh-listener/0.log" Nov 24 15:17:34 crc kubenswrapper[5039]: I1124 15:17:34.513993 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_0201ebd4-dc90-4332-b036-38d4d2a1ea2a/aodh-notifier/0.log" Nov 24 15:17:34 crc kubenswrapper[5039]: I1124 15:17:34.612403 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7c65477b5b-lzp7p_910059fe-375d-443a-8dce-3dd9d0ea7bce/barbican-api/0.log" Nov 24 15:17:34 crc kubenswrapper[5039]: I1124 15:17:34.980559 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7c65477b5b-lzp7p_910059fe-375d-443a-8dce-3dd9d0ea7bce/barbican-api-log/0.log" Nov 24 15:17:35 crc kubenswrapper[5039]: I1124 15:17:35.109086 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-746f5fd69d-mww4x_df3df8f8-f89f-4eab-98af-d7dd6cfe17da/barbican-keystone-listener/0.log" Nov 24 15:17:35 crc kubenswrapper[5039]: I1124 15:17:35.226930 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-746f5fd69d-mww4x_df3df8f8-f89f-4eab-98af-d7dd6cfe17da/barbican-keystone-listener-log/0.log" Nov 24 15:17:35 crc kubenswrapper[5039]: I1124 15:17:35.245037 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7f697665cf-n6vcs_1228dfc2-bfeb-4ba9-b0f8-ac276a2207be/barbican-worker/0.log" Nov 24 15:17:35 crc kubenswrapper[5039]: I1124 15:17:35.394380 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7f697665cf-n6vcs_1228dfc2-bfeb-4ba9-b0f8-ac276a2207be/barbican-worker-log/0.log" Nov 24 15:17:35 crc kubenswrapper[5039]: I1124 15:17:35.470801 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp_1b1f6884-b4f4-4657-a039-930296794fbe/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:17:35 crc kubenswrapper[5039]: I1124 15:17:35.690200 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_066b84eb-20a0-4d2a-b970-6a4419ac3dcc/ceilometer-central-agent/0.log" Nov 24 15:17:35 crc kubenswrapper[5039]: I1124 15:17:35.747693 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_066b84eb-20a0-4d2a-b970-6a4419ac3dcc/ceilometer-notification-agent/0.log" Nov 24 15:17:35 crc kubenswrapper[5039]: I1124 15:17:35.756897 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_066b84eb-20a0-4d2a-b970-6a4419ac3dcc/proxy-httpd/0.log" Nov 24 15:17:35 crc kubenswrapper[5039]: I1124 15:17:35.830154 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_066b84eb-20a0-4d2a-b970-6a4419ac3dcc/sg-core/0.log" Nov 24 15:17:35 crc kubenswrapper[5039]: I1124 15:17:35.998070 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g_c4761a4e-a177-4629-812b-8f940a7c5b98/ceph-client-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:17:36 crc kubenswrapper[5039]: I1124 15:17:36.054088 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8_a735f69f-6248-4a8a-aeed-cfb50b81c9cb/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:17:36 crc kubenswrapper[5039]: I1124 15:17:36.336909 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_d1847d7c-086b-4615-81d8-a6c5e915dcb4/cinder-api/0.log" Nov 24 15:17:36 crc kubenswrapper[5039]: I1124 15:17:36.393914 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_d1847d7c-086b-4615-81d8-a6c5e915dcb4/cinder-api-log/0.log" Nov 24 15:17:36 crc kubenswrapper[5039]: I1124 15:17:36.574332 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_0ed5042d-f435-4adf-aa2b-6c1949957f4c/cinder-backup/0.log" Nov 24 15:17:36 crc kubenswrapper[5039]: I1124 15:17:36.605166 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_0ed5042d-f435-4adf-aa2b-6c1949957f4c/probe/0.log" Nov 24 15:17:36 crc kubenswrapper[5039]: I1124 15:17:36.639679 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_d1395fb6-6223-4aea-9a6d-e743cecd804e/cinder-scheduler/0.log" Nov 24 15:17:36 crc kubenswrapper[5039]: I1124 15:17:36.931386 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_d82cb8c7-3a11-43f9-94a7-63e8a4b824d4/cinder-volume/0.log" Nov 24 15:17:36 crc kubenswrapper[5039]: I1124 15:17:36.935473 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_d1395fb6-6223-4aea-9a6d-e743cecd804e/probe/0.log" Nov 24 15:17:37 crc kubenswrapper[5039]: I1124 15:17:37.009964 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_d82cb8c7-3a11-43f9-94a7-63e8a4b824d4/probe/0.log" Nov 24 15:17:37 crc kubenswrapper[5039]: I1124 15:17:37.196080 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl_3ffbbfde-6e25-49e1-ab24-061d1e90c133/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:17:37 crc kubenswrapper[5039]: I1124 15:17:37.264066 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5_d6370bcf-3557-4e56-9c7b-670a2ec77ec0/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:17:37 crc kubenswrapper[5039]: I1124 15:17:37.595111 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-74cfff99f-ptjrg_f0ced711-f251-4bc4-b59c-4955f950f20d/init/0.log" Nov 24 15:17:37 crc kubenswrapper[5039]: I1124 15:17:37.811493 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-74cfff99f-ptjrg_f0ced711-f251-4bc4-b59c-4955f950f20d/init/0.log" Nov 24 15:17:37 crc kubenswrapper[5039]: I1124 15:17:37.812156 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-74cfff99f-ptjrg_f0ced711-f251-4bc4-b59c-4955f950f20d/dnsmasq-dns/0.log" Nov 24 15:17:37 crc kubenswrapper[5039]: I1124 15:17:37.922027 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_8609b6fd-f97e-4af8-811f-c86e99bf033a/glance-httpd/0.log" Nov 24 15:17:38 crc kubenswrapper[5039]: I1124 15:17:38.025035 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_8609b6fd-f97e-4af8-811f-c86e99bf033a/glance-log/0.log" Nov 24 15:17:38 crc kubenswrapper[5039]: I1124 15:17:38.110524 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_6f1ea0e7-3b9c-4fed-85cc-901484aed56f/glance-httpd/0.log" Nov 24 15:17:38 crc kubenswrapper[5039]: I1124 15:17:38.212766 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_6f1ea0e7-3b9c-4fed-85cc-901484aed56f/glance-log/0.log" Nov 24 15:17:38 crc kubenswrapper[5039]: I1124 15:17:38.518027 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-b4696fd89-fd5qp_71066830-9639-4b66-b1c2-cbbc8eb2a821/heat-engine/0.log" Nov 24 15:17:38 crc kubenswrapper[5039]: I1124 15:17:38.965859 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-6fbc854bcb-ssv8l_4a0e58d4-73eb-4baf-8698-4c67b711e1a8/horizon/0.log" Nov 24 15:17:39 crc kubenswrapper[5039]: I1124 15:17:39.142948 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n_0270ae43-26ba-4706-827e-c008cf7ca4fa/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:17:39 crc kubenswrapper[5039]: I1124 15:17:39.378733 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-6b64fd586c-rsg7v_8a04d457-423e-463d-8ea9-35d085150af5/heat-cfnapi/0.log" Nov 24 15:17:39 crc kubenswrapper[5039]: I1124 15:17:39.380633 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-w6l8n_6ae099f1-378b-4de8-a8aa-480a714ccbaf/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:17:39 crc kubenswrapper[5039]: I1124 15:17:39.421306 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-7b9d6d4567-h9q74_daa63fbd-a80c-4690-b49c-e402cb6b3c69/heat-api/0.log" Nov 24 15:17:39 crc kubenswrapper[5039]: I1124 15:17:39.469936 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-6fbc854bcb-ssv8l_4a0e58d4-73eb-4baf-8698-4c67b711e1a8/horizon-log/0.log" Nov 24 15:17:39 crc kubenswrapper[5039]: I1124 15:17:39.661250 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29399881-khmls_5589d33f-8cad-4a38-ae7d-f9611bb8efc5/keystone-cron/0.log" Nov 24 15:17:39 crc kubenswrapper[5039]: I1124 15:17:39.710724 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29399941-srhkf_e88464ce-c201-4ce0-831a-bad31b599341/keystone-cron/0.log" Nov 24 15:17:39 crc kubenswrapper[5039]: I1124 15:17:39.911736 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_07adf2a8-6758-4e5e-b757-6d32eebb1f93/kube-state-metrics/0.log" Nov 24 15:17:40 crc kubenswrapper[5039]: I1124 15:17:40.002297 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-5s78f_09487809-1d9c-44f7-81e0-91d56354f51c/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:17:40 crc kubenswrapper[5039]: I1124 15:17:40.078784 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-576959578d-mb556_a0a57e07-3e25-4329-9789-c3ff435860c3/keystone-api/0.log" Nov 24 15:17:40 crc kubenswrapper[5039]: I1124 15:17:40.250978 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_logging-edpm-deployment-openstack-edpm-ipam-tpltt_bed151c2-ef33-4571-b779-761a70733f9d/logging-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:17:40 crc kubenswrapper[5039]: I1124 15:17:40.340685 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_92bc16f0-cdd7-4437-aa94-57bf0cd83126/manila-api-log/0.log" Nov 24 15:17:40 crc kubenswrapper[5039]: I1124 15:17:40.352565 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_92bc16f0-cdd7-4437-aa94-57bf0cd83126/manila-api/0.log" Nov 24 15:17:40 crc kubenswrapper[5039]: I1124 15:17:40.746097 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_8f957236-16e6-45c4-8174-b20f69df4ecb/probe/0.log" Nov 24 15:17:40 crc kubenswrapper[5039]: I1124 15:17:40.784296 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_48847c7a-e55f-4a84-8448-89447c762f34/manila-share/0.log" Nov 24 15:17:40 crc kubenswrapper[5039]: I1124 15:17:40.789580 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_8f957236-16e6-45c4-8174-b20f69df4ecb/manila-scheduler/0.log" Nov 24 15:17:40 crc kubenswrapper[5039]: I1124 15:17:40.890304 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_48847c7a-e55f-4a84-8448-89447c762f34/probe/0.log" Nov 24 15:17:41 crc kubenswrapper[5039]: I1124 15:17:41.055415 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mysqld-exporter-0_52ce1afd-e5d8-401a-8fb1-e02b6aff131b/mysqld-exporter/0.log" Nov 24 15:17:41 crc kubenswrapper[5039]: I1124 15:17:41.471901 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7bdd5bd5df-sqgnq_05e189da-8176-4c22-9069-51d7e5f8b867/neutron-httpd/0.log" Nov 24 15:17:41 crc kubenswrapper[5039]: I1124 15:17:41.509797 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7_bb9ef170-1b1b-4027-9ac0-b0e67efda529/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:17:41 crc kubenswrapper[5039]: I1124 15:17:41.512284 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7bdd5bd5df-sqgnq_05e189da-8176-4c22-9069-51d7e5f8b867/neutron-api/0.log" Nov 24 15:17:42 crc kubenswrapper[5039]: I1124 15:17:42.055265 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_62ba09af-0d54-45af-8bed-9c8a1a3661f2/nova-cell0-conductor-conductor/0.log" Nov 24 15:17:42 crc kubenswrapper[5039]: I1124 15:17:42.313528 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:17:42 crc kubenswrapper[5039]: E1124 15:17:42.314816 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:17:42 crc kubenswrapper[5039]: I1124 15:17:42.394270 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_5894e63c-79c2-42a0-bc65-95f1a69a1525/nova-cell1-conductor-conductor/0.log" Nov 24 15:17:42 crc kubenswrapper[5039]: I1124 15:17:42.461963 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_42be8bb1-8823-4a1f-8777-348baedb7758/nova-api-log/0.log" Nov 24 15:17:42 crc kubenswrapper[5039]: I1124 15:17:42.670798 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_8bcc04e6-8265-45a0-9883-cf6831c72a9c/nova-cell1-novncproxy-novncproxy/0.log" Nov 24 15:17:42 crc kubenswrapper[5039]: I1124 15:17:42.757167 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc_49f0a456-4039-4471-9dd2-c17ea42981e3/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:17:43 crc kubenswrapper[5039]: I1124 15:17:43.045179 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_1c245130-8f33-4226-b312-9573746acd0f/nova-metadata-log/0.log" Nov 24 15:17:43 crc kubenswrapper[5039]: I1124 15:17:43.179767 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_42be8bb1-8823-4a1f-8777-348baedb7758/nova-api-api/0.log" Nov 24 15:17:43 crc kubenswrapper[5039]: I1124 15:17:43.440418 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_04160090-3eab-412c-a6e0-6946a44bcb81/nova-scheduler-scheduler/0.log" Nov 24 15:17:43 crc kubenswrapper[5039]: I1124 15:17:43.478004 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_1dcf47d4-1399-46bb-bda8-5dfeb96a3b60/mysql-bootstrap/0.log" Nov 24 15:17:43 crc kubenswrapper[5039]: I1124 15:17:43.743712 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_1dcf47d4-1399-46bb-bda8-5dfeb96a3b60/galera/0.log" Nov 24 15:17:43 crc kubenswrapper[5039]: I1124 15:17:43.912895 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_1dcf47d4-1399-46bb-bda8-5dfeb96a3b60/mysql-bootstrap/0.log" Nov 24 15:17:44 crc kubenswrapper[5039]: I1124 15:17:44.152949 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_c3dc205b-caf2-45c8-8110-d0f8be91e10f/mysql-bootstrap/0.log" Nov 24 15:17:44 crc kubenswrapper[5039]: I1124 15:17:44.451252 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_c3dc205b-caf2-45c8-8110-d0f8be91e10f/galera/0.log" Nov 24 15:17:44 crc kubenswrapper[5039]: I1124 15:17:44.463893 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_c3dc205b-caf2-45c8-8110-d0f8be91e10f/mysql-bootstrap/0.log" Nov 24 15:17:44 crc kubenswrapper[5039]: I1124 15:17:44.647700 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_bbd0fae4-aa15-46d2-8118-f738c3c1dc3c/openstackclient/0.log" Nov 24 15:17:44 crc kubenswrapper[5039]: I1124 15:17:44.892092 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-5dqj9_4fc86906-5a7c-4bfe-8d23-1c98a8711a4a/ovn-controller/0.log" Nov 24 15:17:45 crc kubenswrapper[5039]: I1124 15:17:45.004070 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-wb4sk_6e986f2a-8ff1-4efb-aff0-7e294c0845bf/openstack-network-exporter/0.log" Nov 24 15:17:45 crc kubenswrapper[5039]: I1124 15:17:45.199673 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-2cfx8_cfe8f618-f843-4051-9491-cb3d06e1a1bc/ovsdb-server-init/0.log" Nov 24 15:17:45 crc kubenswrapper[5039]: I1124 15:17:45.427673 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-2cfx8_cfe8f618-f843-4051-9491-cb3d06e1a1bc/ovs-vswitchd/0.log" Nov 24 15:17:45 crc kubenswrapper[5039]: I1124 15:17:45.431677 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-2cfx8_cfe8f618-f843-4051-9491-cb3d06e1a1bc/ovsdb-server-init/0.log" Nov 24 15:17:45 crc kubenswrapper[5039]: I1124 15:17:45.480637 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-2cfx8_cfe8f618-f843-4051-9491-cb3d06e1a1bc/ovsdb-server/0.log" Nov 24 15:17:45 crc kubenswrapper[5039]: I1124 15:17:45.678804 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-h8rn4_a1fa909b-2535-405a-9969-fc0ca9ff77fc/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:17:45 crc kubenswrapper[5039]: I1124 15:17:45.846347 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_1c245130-8f33-4226-b312-9573746acd0f/nova-metadata-metadata/0.log" Nov 24 15:17:45 crc kubenswrapper[5039]: I1124 15:17:45.881417 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_9523b0b0-e489-4eb8-8954-83dd766373df/openstack-network-exporter/0.log" Nov 24 15:17:46 crc kubenswrapper[5039]: I1124 15:17:46.004832 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_9523b0b0-e489-4eb8-8954-83dd766373df/ovn-northd/0.log" Nov 24 15:17:46 crc kubenswrapper[5039]: I1124 15:17:46.100996 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9/openstack-network-exporter/0.log" Nov 24 15:17:46 crc kubenswrapper[5039]: I1124 15:17:46.132378 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9/ovsdbserver-nb/0.log" Nov 24 15:17:46 crc kubenswrapper[5039]: I1124 15:17:46.277879 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_a5a69761-eccd-49e6-8749-86142600d287/openstack-network-exporter/0.log" Nov 24 15:17:46 crc kubenswrapper[5039]: I1124 15:17:46.311726 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_a5a69761-eccd-49e6-8749-86142600d287/ovsdbserver-sb/0.log" Nov 24 15:17:46 crc kubenswrapper[5039]: I1124 15:17:46.649658 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_54819035-007f-4162-9419-d825f50e1ce9/init-config-reloader/0.log" Nov 24 15:17:46 crc kubenswrapper[5039]: I1124 15:17:46.686262 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-66cb4657dd-z97bx_7cd31c1b-3250-444a-a717-88349d2c57a0/placement-api/0.log" Nov 24 15:17:46 crc kubenswrapper[5039]: I1124 15:17:46.715153 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-66cb4657dd-z97bx_7cd31c1b-3250-444a-a717-88349d2c57a0/placement-log/0.log" Nov 24 15:17:46 crc kubenswrapper[5039]: I1124 15:17:46.884235 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_54819035-007f-4162-9419-d825f50e1ce9/init-config-reloader/0.log" Nov 24 15:17:46 crc kubenswrapper[5039]: I1124 15:17:46.963446 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_54819035-007f-4162-9419-d825f50e1ce9/config-reloader/0.log" Nov 24 15:17:46 crc kubenswrapper[5039]: I1124 15:17:46.973074 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_54819035-007f-4162-9419-d825f50e1ce9/prometheus/0.log" Nov 24 15:17:46 crc kubenswrapper[5039]: I1124 15:17:46.985575 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_54819035-007f-4162-9419-d825f50e1ce9/thanos-sidecar/0.log" Nov 24 15:17:47 crc kubenswrapper[5039]: I1124 15:17:47.197909 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_c2b248b0-d5b6-4800-9f0a-915f03d73696/setup-container/0.log" Nov 24 15:17:47 crc kubenswrapper[5039]: I1124 15:17:47.617596 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_c2b248b0-d5b6-4800-9f0a-915f03d73696/setup-container/0.log" Nov 24 15:17:47 crc kubenswrapper[5039]: I1124 15:17:47.682413 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_c2b248b0-d5b6-4800-9f0a-915f03d73696/rabbitmq/0.log" Nov 24 15:17:47 crc kubenswrapper[5039]: I1124 15:17:47.715446 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_b820e90e-779c-4300-b0e0-affe5118e73f/setup-container/0.log" Nov 24 15:17:47 crc kubenswrapper[5039]: I1124 15:17:47.990309 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_b820e90e-779c-4300-b0e0-affe5118e73f/setup-container/0.log" Nov 24 15:17:47 crc kubenswrapper[5039]: I1124 15:17:47.993723 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m_76784bc3-6a6c-4ecc-8799-daffb50a9ca3/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:17:48 crc kubenswrapper[5039]: I1124 15:17:48.009402 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_b820e90e-779c-4300-b0e0-affe5118e73f/rabbitmq/0.log" Nov 24 15:17:48 crc kubenswrapper[5039]: I1124 15:17:48.237254 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj_c2bf2f79-d7fa-47ca-a8fc-b48d77875208/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:17:48 crc kubenswrapper[5039]: I1124 15:17:48.347575 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-hnwls_7ef0c61d-e6e4-49d2-949c-ed412b59186f/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:17:48 crc kubenswrapper[5039]: I1124 15:17:48.489304 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-bqd9r_20f1c3b8-744c-45ab-b82c-07fab5659614/ssh-known-hosts-edpm-deployment/0.log" Nov 24 15:17:48 crc kubenswrapper[5039]: I1124 15:17:48.811754 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-5b66587b55-thzjl_bd1bf6a5-309b-4960-8f37-34b006db3599/proxy-server/0.log" Nov 24 15:17:48 crc kubenswrapper[5039]: I1124 15:17:48.893835 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-vnskv_d7a6efff-c0ad-43c3-999c-d4840d3c5825/swift-ring-rebalance/0.log" Nov 24 15:17:48 crc kubenswrapper[5039]: I1124 15:17:48.901159 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-5b66587b55-thzjl_bd1bf6a5-309b-4960-8f37-34b006db3599/proxy-httpd/0.log" Nov 24 15:17:49 crc kubenswrapper[5039]: I1124 15:17:49.074683 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/account-auditor/0.log" Nov 24 15:17:49 crc kubenswrapper[5039]: I1124 15:17:49.141159 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/account-reaper/0.log" Nov 24 15:17:49 crc kubenswrapper[5039]: I1124 15:17:49.157920 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/account-replicator/0.log" Nov 24 15:17:49 crc kubenswrapper[5039]: I1124 15:17:49.307216 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/container-auditor/0.log" Nov 24 15:17:49 crc kubenswrapper[5039]: I1124 15:17:49.360702 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/container-server/0.log" Nov 24 15:17:49 crc kubenswrapper[5039]: I1124 15:17:49.399036 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/account-server/0.log" Nov 24 15:17:49 crc kubenswrapper[5039]: I1124 15:17:49.403862 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/container-replicator/0.log" Nov 24 15:17:49 crc kubenswrapper[5039]: I1124 15:17:49.504047 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/container-updater/0.log" Nov 24 15:17:49 crc kubenswrapper[5039]: I1124 15:17:49.622250 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/object-auditor/0.log" Nov 24 15:17:49 crc kubenswrapper[5039]: I1124 15:17:49.664418 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/object-expirer/0.log" Nov 24 15:17:49 crc kubenswrapper[5039]: I1124 15:17:49.690238 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/object-replicator/0.log" Nov 24 15:17:49 crc kubenswrapper[5039]: I1124 15:17:49.758286 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/object-server/0.log" Nov 24 15:17:49 crc kubenswrapper[5039]: I1124 15:17:49.864135 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/object-updater/0.log" Nov 24 15:17:49 crc kubenswrapper[5039]: I1124 15:17:49.924551 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/rsync/0.log" Nov 24 15:17:49 crc kubenswrapper[5039]: I1124 15:17:49.950741 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/swift-recon-cron/0.log" Nov 24 15:17:50 crc kubenswrapper[5039]: I1124 15:17:50.241102 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2_d1d48eba-5a90-4ca3-b298-f19175f93608/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:17:50 crc kubenswrapper[5039]: I1124 15:17:50.262992 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5_d8589776-bb1f-42ea-8bfa-7053520c66b7/telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:17:50 crc kubenswrapper[5039]: I1124 15:17:50.536569 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_2c05319a-d5d8-4585-8d73-0bc049535803/test-operator-logs-container/0.log" Nov 24 15:17:50 crc kubenswrapper[5039]: I1124 15:17:50.786919 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7_f9911acb-6e34-497c-9346-18b3299f63be/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:17:51 crc kubenswrapper[5039]: I1124 15:17:51.510663 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_e515a4f0-d838-4d61-906b-f26a0c07f8c8/tempest-tests-tempest-tests-runner/0.log" Nov 24 15:17:54 crc kubenswrapper[5039]: I1124 15:17:54.314286 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:17:54 crc kubenswrapper[5039]: E1124 15:17:54.316143 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:18:02 crc kubenswrapper[5039]: I1124 15:18:02.652427 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_3cef0b8e-d050-4055-a798-31b108727299/memcached/0.log" Nov 24 15:18:09 crc kubenswrapper[5039]: I1124 15:18:09.306986 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:18:09 crc kubenswrapper[5039]: E1124 15:18:09.307838 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:18:19 crc kubenswrapper[5039]: I1124 15:18:19.475840 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-tctwl"] Nov 24 15:18:19 crc kubenswrapper[5039]: E1124 15:18:19.476811 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8408254e-8bf2-4bd0-a5da-6d8556278a6d" containerName="container-00" Nov 24 15:18:19 crc kubenswrapper[5039]: I1124 15:18:19.476824 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="8408254e-8bf2-4bd0-a5da-6d8556278a6d" containerName="container-00" Nov 24 15:18:19 crc kubenswrapper[5039]: I1124 15:18:19.477052 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="8408254e-8bf2-4bd0-a5da-6d8556278a6d" containerName="container-00" Nov 24 15:18:19 crc kubenswrapper[5039]: I1124 15:18:19.478678 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tctwl" Nov 24 15:18:19 crc kubenswrapper[5039]: I1124 15:18:19.503062 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tctwl"] Nov 24 15:18:19 crc kubenswrapper[5039]: I1124 15:18:19.511859 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnm6q\" (UniqueName: \"kubernetes.io/projected/4179b75e-befe-4e91-97d8-3f4fa5e9628e-kube-api-access-xnm6q\") pod \"redhat-marketplace-tctwl\" (UID: \"4179b75e-befe-4e91-97d8-3f4fa5e9628e\") " pod="openshift-marketplace/redhat-marketplace-tctwl" Nov 24 15:18:19 crc kubenswrapper[5039]: I1124 15:18:19.511992 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4179b75e-befe-4e91-97d8-3f4fa5e9628e-utilities\") pod \"redhat-marketplace-tctwl\" (UID: \"4179b75e-befe-4e91-97d8-3f4fa5e9628e\") " pod="openshift-marketplace/redhat-marketplace-tctwl" Nov 24 15:18:19 crc kubenswrapper[5039]: I1124 15:18:19.512013 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4179b75e-befe-4e91-97d8-3f4fa5e9628e-catalog-content\") pod \"redhat-marketplace-tctwl\" (UID: \"4179b75e-befe-4e91-97d8-3f4fa5e9628e\") " pod="openshift-marketplace/redhat-marketplace-tctwl" Nov 24 15:18:19 crc kubenswrapper[5039]: I1124 15:18:19.613992 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4179b75e-befe-4e91-97d8-3f4fa5e9628e-utilities\") pod \"redhat-marketplace-tctwl\" (UID: \"4179b75e-befe-4e91-97d8-3f4fa5e9628e\") " pod="openshift-marketplace/redhat-marketplace-tctwl" Nov 24 15:18:19 crc kubenswrapper[5039]: I1124 15:18:19.614047 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4179b75e-befe-4e91-97d8-3f4fa5e9628e-catalog-content\") pod \"redhat-marketplace-tctwl\" (UID: \"4179b75e-befe-4e91-97d8-3f4fa5e9628e\") " pod="openshift-marketplace/redhat-marketplace-tctwl" Nov 24 15:18:19 crc kubenswrapper[5039]: I1124 15:18:19.614177 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnm6q\" (UniqueName: \"kubernetes.io/projected/4179b75e-befe-4e91-97d8-3f4fa5e9628e-kube-api-access-xnm6q\") pod \"redhat-marketplace-tctwl\" (UID: \"4179b75e-befe-4e91-97d8-3f4fa5e9628e\") " pod="openshift-marketplace/redhat-marketplace-tctwl" Nov 24 15:18:19 crc kubenswrapper[5039]: I1124 15:18:19.615063 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4179b75e-befe-4e91-97d8-3f4fa5e9628e-utilities\") pod \"redhat-marketplace-tctwl\" (UID: \"4179b75e-befe-4e91-97d8-3f4fa5e9628e\") " pod="openshift-marketplace/redhat-marketplace-tctwl" Nov 24 15:18:19 crc kubenswrapper[5039]: I1124 15:18:19.615182 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4179b75e-befe-4e91-97d8-3f4fa5e9628e-catalog-content\") pod \"redhat-marketplace-tctwl\" (UID: \"4179b75e-befe-4e91-97d8-3f4fa5e9628e\") " pod="openshift-marketplace/redhat-marketplace-tctwl" Nov 24 15:18:19 crc kubenswrapper[5039]: I1124 15:18:19.636843 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnm6q\" (UniqueName: \"kubernetes.io/projected/4179b75e-befe-4e91-97d8-3f4fa5e9628e-kube-api-access-xnm6q\") pod \"redhat-marketplace-tctwl\" (UID: \"4179b75e-befe-4e91-97d8-3f4fa5e9628e\") " pod="openshift-marketplace/redhat-marketplace-tctwl" Nov 24 15:18:19 crc kubenswrapper[5039]: I1124 15:18:19.803729 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tctwl" Nov 24 15:18:20 crc kubenswrapper[5039]: I1124 15:18:20.471760 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2hsdg"] Nov 24 15:18:20 crc kubenswrapper[5039]: I1124 15:18:20.474352 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2hsdg" Nov 24 15:18:20 crc kubenswrapper[5039]: I1124 15:18:20.504888 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2hsdg"] Nov 24 15:18:20 crc kubenswrapper[5039]: I1124 15:18:20.519073 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tctwl"] Nov 24 15:18:20 crc kubenswrapper[5039]: I1124 15:18:20.535795 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mwk6\" (UniqueName: \"kubernetes.io/projected/71436ae2-8c4f-46bd-b877-b93ab84dbdac-kube-api-access-2mwk6\") pod \"certified-operators-2hsdg\" (UID: \"71436ae2-8c4f-46bd-b877-b93ab84dbdac\") " pod="openshift-marketplace/certified-operators-2hsdg" Nov 24 15:18:20 crc kubenswrapper[5039]: I1124 15:18:20.535985 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71436ae2-8c4f-46bd-b877-b93ab84dbdac-utilities\") pod \"certified-operators-2hsdg\" (UID: \"71436ae2-8c4f-46bd-b877-b93ab84dbdac\") " pod="openshift-marketplace/certified-operators-2hsdg" Nov 24 15:18:20 crc kubenswrapper[5039]: I1124 15:18:20.536132 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71436ae2-8c4f-46bd-b877-b93ab84dbdac-catalog-content\") pod \"certified-operators-2hsdg\" (UID: \"71436ae2-8c4f-46bd-b877-b93ab84dbdac\") " pod="openshift-marketplace/certified-operators-2hsdg" Nov 24 15:18:20 crc kubenswrapper[5039]: I1124 15:18:20.637946 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71436ae2-8c4f-46bd-b877-b93ab84dbdac-utilities\") pod \"certified-operators-2hsdg\" (UID: \"71436ae2-8c4f-46bd-b877-b93ab84dbdac\") " pod="openshift-marketplace/certified-operators-2hsdg" Nov 24 15:18:20 crc kubenswrapper[5039]: I1124 15:18:20.638273 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71436ae2-8c4f-46bd-b877-b93ab84dbdac-catalog-content\") pod \"certified-operators-2hsdg\" (UID: \"71436ae2-8c4f-46bd-b877-b93ab84dbdac\") " pod="openshift-marketplace/certified-operators-2hsdg" Nov 24 15:18:20 crc kubenswrapper[5039]: I1124 15:18:20.638335 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mwk6\" (UniqueName: \"kubernetes.io/projected/71436ae2-8c4f-46bd-b877-b93ab84dbdac-kube-api-access-2mwk6\") pod \"certified-operators-2hsdg\" (UID: \"71436ae2-8c4f-46bd-b877-b93ab84dbdac\") " pod="openshift-marketplace/certified-operators-2hsdg" Nov 24 15:18:20 crc kubenswrapper[5039]: I1124 15:18:20.638696 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71436ae2-8c4f-46bd-b877-b93ab84dbdac-utilities\") pod \"certified-operators-2hsdg\" (UID: \"71436ae2-8c4f-46bd-b877-b93ab84dbdac\") " pod="openshift-marketplace/certified-operators-2hsdg" Nov 24 15:18:20 crc kubenswrapper[5039]: I1124 15:18:20.639143 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71436ae2-8c4f-46bd-b877-b93ab84dbdac-catalog-content\") pod \"certified-operators-2hsdg\" (UID: \"71436ae2-8c4f-46bd-b877-b93ab84dbdac\") " pod="openshift-marketplace/certified-operators-2hsdg" Nov 24 15:18:20 crc kubenswrapper[5039]: I1124 15:18:20.660355 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mwk6\" (UniqueName: \"kubernetes.io/projected/71436ae2-8c4f-46bd-b877-b93ab84dbdac-kube-api-access-2mwk6\") pod \"certified-operators-2hsdg\" (UID: \"71436ae2-8c4f-46bd-b877-b93ab84dbdac\") " pod="openshift-marketplace/certified-operators-2hsdg" Nov 24 15:18:20 crc kubenswrapper[5039]: I1124 15:18:20.877066 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2hsdg" Nov 24 15:18:21 crc kubenswrapper[5039]: I1124 15:18:21.191145 5039 generic.go:334] "Generic (PLEG): container finished" podID="4179b75e-befe-4e91-97d8-3f4fa5e9628e" containerID="9a39f149599b22aa451a68994470a48f8020a52cddb9a363b0cd380578262fe0" exitCode=0 Nov 24 15:18:21 crc kubenswrapper[5039]: I1124 15:18:21.191346 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tctwl" event={"ID":"4179b75e-befe-4e91-97d8-3f4fa5e9628e","Type":"ContainerDied","Data":"9a39f149599b22aa451a68994470a48f8020a52cddb9a363b0cd380578262fe0"} Nov 24 15:18:21 crc kubenswrapper[5039]: I1124 15:18:21.191415 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tctwl" event={"ID":"4179b75e-befe-4e91-97d8-3f4fa5e9628e","Type":"ContainerStarted","Data":"7a7020077222d0d5860534771ab7e95c5026f56f4c972262e2fbc346ccee7f3d"} Nov 24 15:18:21 crc kubenswrapper[5039]: I1124 15:18:21.411846 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2hsdg"] Nov 24 15:18:21 crc kubenswrapper[5039]: W1124 15:18:21.416237 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71436ae2_8c4f_46bd_b877_b93ab84dbdac.slice/crio-321b99e30becc48c75d22af3615c5f3d6c48a5efee7ba4f53ff6184d33fa6f87 WatchSource:0}: Error finding container 321b99e30becc48c75d22af3615c5f3d6c48a5efee7ba4f53ff6184d33fa6f87: Status 404 returned error can't find the container with id 321b99e30becc48c75d22af3615c5f3d6c48a5efee7ba4f53ff6184d33fa6f87 Nov 24 15:18:22 crc kubenswrapper[5039]: I1124 15:18:22.201366 5039 generic.go:334] "Generic (PLEG): container finished" podID="71436ae2-8c4f-46bd-b877-b93ab84dbdac" containerID="11ce6f44faafa146b091ad1e719584293cfc8c376acc5648bc5ce4dcdcd6bbb5" exitCode=0 Nov 24 15:18:22 crc kubenswrapper[5039]: I1124 15:18:22.201414 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hsdg" event={"ID":"71436ae2-8c4f-46bd-b877-b93ab84dbdac","Type":"ContainerDied","Data":"11ce6f44faafa146b091ad1e719584293cfc8c376acc5648bc5ce4dcdcd6bbb5"} Nov 24 15:18:22 crc kubenswrapper[5039]: I1124 15:18:22.201752 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hsdg" event={"ID":"71436ae2-8c4f-46bd-b877-b93ab84dbdac","Type":"ContainerStarted","Data":"321b99e30becc48c75d22af3615c5f3d6c48a5efee7ba4f53ff6184d33fa6f87"} Nov 24 15:18:22 crc kubenswrapper[5039]: I1124 15:18:22.204082 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tctwl" event={"ID":"4179b75e-befe-4e91-97d8-3f4fa5e9628e","Type":"ContainerStarted","Data":"abc3f2233be19eafe7cfabeeeb3e5863837aeaf1b399edba17252495a634e88a"} Nov 24 15:18:22 crc kubenswrapper[5039]: I1124 15:18:22.484652 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-86dc4d89c8-sgqwz_b913096c-9ece-4755-9545-0116fbc53123/kube-rbac-proxy/0.log" Nov 24 15:18:22 crc kubenswrapper[5039]: I1124 15:18:22.586151 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-86dc4d89c8-sgqwz_b913096c-9ece-4755-9545-0116fbc53123/manager/0.log" Nov 24 15:18:22 crc kubenswrapper[5039]: I1124 15:18:22.755876 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-79856dc55c-whklh_396e7965-a743-4028-989b-e3610abb5a3a/kube-rbac-proxy/0.log" Nov 24 15:18:22 crc kubenswrapper[5039]: I1124 15:18:22.792369 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-79856dc55c-whklh_396e7965-a743-4028-989b-e3610abb5a3a/manager/0.log" Nov 24 15:18:22 crc kubenswrapper[5039]: I1124 15:18:22.975935 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d695c9b56-6nwfx_99f1711f-1dd9-471d-9a2d-8c6e0a46fb0d/kube-rbac-proxy/0.log" Nov 24 15:18:23 crc kubenswrapper[5039]: I1124 15:18:23.024687 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d695c9b56-6nwfx_99f1711f-1dd9-471d-9a2d-8c6e0a46fb0d/manager/0.log" Nov 24 15:18:23 crc kubenswrapper[5039]: I1124 15:18:23.101436 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b_0dc29372-3c0b-496e-b027-e57abc3ca956/util/0.log" Nov 24 15:18:23 crc kubenswrapper[5039]: I1124 15:18:23.215374 5039 generic.go:334] "Generic (PLEG): container finished" podID="4179b75e-befe-4e91-97d8-3f4fa5e9628e" containerID="abc3f2233be19eafe7cfabeeeb3e5863837aeaf1b399edba17252495a634e88a" exitCode=0 Nov 24 15:18:23 crc kubenswrapper[5039]: I1124 15:18:23.215432 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tctwl" event={"ID":"4179b75e-befe-4e91-97d8-3f4fa5e9628e","Type":"ContainerDied","Data":"abc3f2233be19eafe7cfabeeeb3e5863837aeaf1b399edba17252495a634e88a"} Nov 24 15:18:23 crc kubenswrapper[5039]: I1124 15:18:23.319030 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b_0dc29372-3c0b-496e-b027-e57abc3ca956/pull/0.log" Nov 24 15:18:23 crc kubenswrapper[5039]: I1124 15:18:23.343130 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b_0dc29372-3c0b-496e-b027-e57abc3ca956/util/0.log" Nov 24 15:18:23 crc kubenswrapper[5039]: I1124 15:18:23.376916 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b_0dc29372-3c0b-496e-b027-e57abc3ca956/pull/0.log" Nov 24 15:18:23 crc kubenswrapper[5039]: I1124 15:18:23.533205 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b_0dc29372-3c0b-496e-b027-e57abc3ca956/pull/0.log" Nov 24 15:18:23 crc kubenswrapper[5039]: I1124 15:18:23.606183 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b_0dc29372-3c0b-496e-b027-e57abc3ca956/util/0.log" Nov 24 15:18:23 crc kubenswrapper[5039]: I1124 15:18:23.626837 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b_0dc29372-3c0b-496e-b027-e57abc3ca956/extract/0.log" Nov 24 15:18:23 crc kubenswrapper[5039]: I1124 15:18:23.780646 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-68b95954c9-l8cvk_0067c9ac-5dfc-4e0d-b316-161e02698ffd/kube-rbac-proxy/0.log" Nov 24 15:18:24 crc kubenswrapper[5039]: I1124 15:18:24.237957 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tctwl" event={"ID":"4179b75e-befe-4e91-97d8-3f4fa5e9628e","Type":"ContainerStarted","Data":"c3e62ef071581329751726008c04f0b7b6ef99c40ec9254ccd8e9eefc369bd84"} Nov 24 15:18:24 crc kubenswrapper[5039]: I1124 15:18:24.259995 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-tctwl" podStartSLOduration=2.567075095 podStartE2EDuration="5.25997581s" podCreationTimestamp="2025-11-24 15:18:19 +0000 UTC" firstStartedPulling="2025-11-24 15:18:21.194645575 +0000 UTC m=+7213.633770075" lastFinishedPulling="2025-11-24 15:18:23.88754629 +0000 UTC m=+7216.326670790" observedRunningTime="2025-11-24 15:18:24.259856448 +0000 UTC m=+7216.698980948" watchObservedRunningTime="2025-11-24 15:18:24.25997581 +0000 UTC m=+7216.699100310" Nov 24 15:18:24 crc kubenswrapper[5039]: I1124 15:18:24.308939 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:18:24 crc kubenswrapper[5039]: E1124 15:18:24.309206 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:18:24 crc kubenswrapper[5039]: I1124 15:18:24.332337 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-68b95954c9-l8cvk_0067c9ac-5dfc-4e0d-b316-161e02698ffd/manager/0.log" Nov 24 15:18:24 crc kubenswrapper[5039]: I1124 15:18:24.441877 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-774b86978c-bgn7g_bea08559-78a5-4287-85e8-a83768d94670/manager/0.log" Nov 24 15:18:24 crc kubenswrapper[5039]: I1124 15:18:24.572082 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-774b86978c-bgn7g_bea08559-78a5-4287-85e8-a83768d94670/kube-rbac-proxy/0.log" Nov 24 15:18:24 crc kubenswrapper[5039]: I1124 15:18:24.578327 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c9694994-69pxs_eee87172-9357-412c-8eb2-7df01649f1d0/kube-rbac-proxy/0.log" Nov 24 15:18:24 crc kubenswrapper[5039]: I1124 15:18:24.814022 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c9694994-69pxs_eee87172-9357-412c-8eb2-7df01649f1d0/manager/0.log" Nov 24 15:18:24 crc kubenswrapper[5039]: I1124 15:18:24.937475 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-d5cc86f4b-j7gmp_bd656299-f7da-4ca8-aee9-25c389243cc9/kube-rbac-proxy/0.log" Nov 24 15:18:25 crc kubenswrapper[5039]: I1124 15:18:25.177199 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-d5cc86f4b-j7gmp_bd656299-f7da-4ca8-aee9-25c389243cc9/manager/0.log" Nov 24 15:18:25 crc kubenswrapper[5039]: I1124 15:18:25.363275 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5bfcdc958c-68gjk_35f14195-18aa-433d-8705-1aa24a8a1818/kube-rbac-proxy/0.log" Nov 24 15:18:25 crc kubenswrapper[5039]: I1124 15:18:25.545712 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5bfcdc958c-68gjk_35f14195-18aa-433d-8705-1aa24a8a1818/manager/0.log" Nov 24 15:18:25 crc kubenswrapper[5039]: I1124 15:18:25.602140 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-748dc6576f-2lmlh_29991995-423a-42c0-ae52-2b3c160a3e0c/kube-rbac-proxy/0.log" Nov 24 15:18:25 crc kubenswrapper[5039]: I1124 15:18:25.847192 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-748dc6576f-2lmlh_29991995-423a-42c0-ae52-2b3c160a3e0c/manager/0.log" Nov 24 15:18:25 crc kubenswrapper[5039]: I1124 15:18:25.956726 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58bb8d67cc-nng76_979a5bac-57c9-4d42-9af6-11228e980f7f/kube-rbac-proxy/0.log" Nov 24 15:18:26 crc kubenswrapper[5039]: I1124 15:18:26.001362 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58bb8d67cc-nng76_979a5bac-57c9-4d42-9af6-11228e980f7f/manager/0.log" Nov 24 15:18:26 crc kubenswrapper[5039]: I1124 15:18:26.185694 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c4fdb7-phdb6_3c29a4a4-1d0c-4a1f-a4b5-a67cb564707a/kube-rbac-proxy/0.log" Nov 24 15:18:26 crc kubenswrapper[5039]: I1124 15:18:26.268886 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c4fdb7-phdb6_3c29a4a4-1d0c-4a1f-a4b5-a67cb564707a/manager/0.log" Nov 24 15:18:26 crc kubenswrapper[5039]: I1124 15:18:26.416429 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7c57c8bbc4-rvq7d_edf2350a-f77f-45ec-87c1-35f7b38ddcb3/kube-rbac-proxy/0.log" Nov 24 15:18:26 crc kubenswrapper[5039]: I1124 15:18:26.455977 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7c57c8bbc4-rvq7d_edf2350a-f77f-45ec-87c1-35f7b38ddcb3/manager/0.log" Nov 24 15:18:26 crc kubenswrapper[5039]: I1124 15:18:26.535793 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-nwsck_98b88919-04d2-4c01-b45a-dd72afbbe179/kube-rbac-proxy/0.log" Nov 24 15:18:26 crc kubenswrapper[5039]: I1124 15:18:26.687788 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-nwsck_98b88919-04d2-4c01-b45a-dd72afbbe179/manager/0.log" Nov 24 15:18:26 crc kubenswrapper[5039]: I1124 15:18:26.818746 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-fd75fd47d-88sqn_e97ae0ee-d044-4b9d-a371-eec59a5ff932/manager/0.log" Nov 24 15:18:26 crc kubenswrapper[5039]: I1124 15:18:26.859791 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-fd75fd47d-88sqn_e97ae0ee-d044-4b9d-a371-eec59a5ff932/kube-rbac-proxy/0.log" Nov 24 15:18:26 crc kubenswrapper[5039]: I1124 15:18:26.936086 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb_3731fd87-4c6a-4fb0-a3d5-cf48e76a5448/kube-rbac-proxy/0.log" Nov 24 15:18:27 crc kubenswrapper[5039]: I1124 15:18:27.018251 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb_3731fd87-4c6a-4fb0-a3d5-cf48e76a5448/manager/0.log" Nov 24 15:18:27 crc kubenswrapper[5039]: I1124 15:18:27.420624 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-lg47r_30a0cc80-e6f2-48b0-9469-32d5d397c0aa/registry-server/0.log" Nov 24 15:18:27 crc kubenswrapper[5039]: I1124 15:18:27.490864 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-6ccdcd8b77-9k4cr_74fee1bc-d496-4e8d-9884-ce1b67a00e75/operator/0.log" Nov 24 15:18:27 crc kubenswrapper[5039]: I1124 15:18:27.516216 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-66cf5c67ff-ft57m_058dcaa2-f18f-4eff-bfd1-d290a8fd36a1/kube-rbac-proxy/0.log" Nov 24 15:18:27 crc kubenswrapper[5039]: I1124 15:18:27.712068 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-66cf5c67ff-ft57m_058dcaa2-f18f-4eff-bfd1-d290a8fd36a1/manager/0.log" Nov 24 15:18:27 crc kubenswrapper[5039]: I1124 15:18:27.736389 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5db546f9d9-hwn8q_865f4099-70b9-45a1-9bcd-c92882c9aab1/kube-rbac-proxy/0.log" Nov 24 15:18:27 crc kubenswrapper[5039]: I1124 15:18:27.800205 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5db546f9d9-hwn8q_865f4099-70b9-45a1-9bcd-c92882c9aab1/manager/0.log" Nov 24 15:18:28 crc kubenswrapper[5039]: I1124 15:18:28.074802 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-j9qqq_076a99d2-27b3-4d08-bdcc-876e1dec4f5f/operator/0.log" Nov 24 15:18:28 crc kubenswrapper[5039]: I1124 15:18:28.078601 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-6fdc4fcf86-qzwtb_90ff7526-7243-45b2-afaa-ee39dff42b46/kube-rbac-proxy/0.log" Nov 24 15:18:28 crc kubenswrapper[5039]: I1124 15:18:28.200754 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-6fdc4fcf86-qzwtb_90ff7526-7243-45b2-afaa-ee39dff42b46/manager/0.log" Nov 24 15:18:28 crc kubenswrapper[5039]: I1124 15:18:28.282108 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-bf6985ffc-g86nb_7f6bb6a1-8df6-4d15-8d27-a5bbc28b9b31/kube-rbac-proxy/0.log" Nov 24 15:18:28 crc kubenswrapper[5039]: I1124 15:18:28.485991 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cb74df96-qk6bz_6cbb9e3e-f545-4d83-aee4-8e122c54437c/kube-rbac-proxy/0.log" Nov 24 15:18:28 crc kubenswrapper[5039]: I1124 15:18:28.522212 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cb74df96-qk6bz_6cbb9e3e-f545-4d83-aee4-8e122c54437c/manager/0.log" Nov 24 15:18:28 crc kubenswrapper[5039]: I1124 15:18:28.783170 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-bf6985ffc-g86nb_7f6bb6a1-8df6-4d15-8d27-a5bbc28b9b31/manager/0.log" Nov 24 15:18:28 crc kubenswrapper[5039]: I1124 15:18:28.953988 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7b5fb95979-n45b6_0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a/manager/0.log" Nov 24 15:18:28 crc kubenswrapper[5039]: I1124 15:18:28.981377 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-864885998-kg5jg_ab34fb1d-70af-4438-86c7-3856f1733097/kube-rbac-proxy/0.log" Nov 24 15:18:28 crc kubenswrapper[5039]: I1124 15:18:28.995861 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-864885998-kg5jg_ab34fb1d-70af-4438-86c7-3856f1733097/manager/0.log" Nov 24 15:18:29 crc kubenswrapper[5039]: I1124 15:18:29.804081 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-tctwl" Nov 24 15:18:29 crc kubenswrapper[5039]: I1124 15:18:29.804125 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-tctwl" Nov 24 15:18:29 crc kubenswrapper[5039]: I1124 15:18:29.856303 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-tctwl" Nov 24 15:18:30 crc kubenswrapper[5039]: I1124 15:18:30.409452 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-tctwl" Nov 24 15:18:30 crc kubenswrapper[5039]: I1124 15:18:30.455148 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tctwl"] Nov 24 15:18:31 crc kubenswrapper[5039]: I1124 15:18:31.362568 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hsdg" event={"ID":"71436ae2-8c4f-46bd-b877-b93ab84dbdac","Type":"ContainerStarted","Data":"afe3b5389e338e3bf63968aa60babd14c851aca591e34ecddff922556417e506"} Nov 24 15:18:32 crc kubenswrapper[5039]: I1124 15:18:32.372086 5039 generic.go:334] "Generic (PLEG): container finished" podID="71436ae2-8c4f-46bd-b877-b93ab84dbdac" containerID="afe3b5389e338e3bf63968aa60babd14c851aca591e34ecddff922556417e506" exitCode=0 Nov 24 15:18:32 crc kubenswrapper[5039]: I1124 15:18:32.372176 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hsdg" event={"ID":"71436ae2-8c4f-46bd-b877-b93ab84dbdac","Type":"ContainerDied","Data":"afe3b5389e338e3bf63968aa60babd14c851aca591e34ecddff922556417e506"} Nov 24 15:18:32 crc kubenswrapper[5039]: I1124 15:18:32.372619 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-tctwl" podUID="4179b75e-befe-4e91-97d8-3f4fa5e9628e" containerName="registry-server" containerID="cri-o://c3e62ef071581329751726008c04f0b7b6ef99c40ec9254ccd8e9eefc369bd84" gracePeriod=2 Nov 24 15:18:33 crc kubenswrapper[5039]: I1124 15:18:33.393984 5039 generic.go:334] "Generic (PLEG): container finished" podID="4179b75e-befe-4e91-97d8-3f4fa5e9628e" containerID="c3e62ef071581329751726008c04f0b7b6ef99c40ec9254ccd8e9eefc369bd84" exitCode=0 Nov 24 15:18:33 crc kubenswrapper[5039]: I1124 15:18:33.394250 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tctwl" event={"ID":"4179b75e-befe-4e91-97d8-3f4fa5e9628e","Type":"ContainerDied","Data":"c3e62ef071581329751726008c04f0b7b6ef99c40ec9254ccd8e9eefc369bd84"} Nov 24 15:18:33 crc kubenswrapper[5039]: I1124 15:18:33.394305 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tctwl" event={"ID":"4179b75e-befe-4e91-97d8-3f4fa5e9628e","Type":"ContainerDied","Data":"7a7020077222d0d5860534771ab7e95c5026f56f4c972262e2fbc346ccee7f3d"} Nov 24 15:18:33 crc kubenswrapper[5039]: I1124 15:18:33.394404 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7a7020077222d0d5860534771ab7e95c5026f56f4c972262e2fbc346ccee7f3d" Nov 24 15:18:33 crc kubenswrapper[5039]: I1124 15:18:33.400484 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hsdg" event={"ID":"71436ae2-8c4f-46bd-b877-b93ab84dbdac","Type":"ContainerStarted","Data":"1dba26704a7b0ea5e4346601a2138b42c879c8c573ccd647eb6484140ad2d3a5"} Nov 24 15:18:33 crc kubenswrapper[5039]: I1124 15:18:33.422415 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2hsdg" podStartSLOduration=2.665272852 podStartE2EDuration="13.422399927s" podCreationTimestamp="2025-11-24 15:18:20 +0000 UTC" firstStartedPulling="2025-11-24 15:18:22.203134939 +0000 UTC m=+7214.642259439" lastFinishedPulling="2025-11-24 15:18:32.960262014 +0000 UTC m=+7225.399386514" observedRunningTime="2025-11-24 15:18:33.417952989 +0000 UTC m=+7225.857077489" watchObservedRunningTime="2025-11-24 15:18:33.422399927 +0000 UTC m=+7225.861524427" Nov 24 15:18:33 crc kubenswrapper[5039]: I1124 15:18:33.440022 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tctwl" Nov 24 15:18:33 crc kubenswrapper[5039]: I1124 15:18:33.486531 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xnm6q\" (UniqueName: \"kubernetes.io/projected/4179b75e-befe-4e91-97d8-3f4fa5e9628e-kube-api-access-xnm6q\") pod \"4179b75e-befe-4e91-97d8-3f4fa5e9628e\" (UID: \"4179b75e-befe-4e91-97d8-3f4fa5e9628e\") " Nov 24 15:18:33 crc kubenswrapper[5039]: I1124 15:18:33.486599 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4179b75e-befe-4e91-97d8-3f4fa5e9628e-catalog-content\") pod \"4179b75e-befe-4e91-97d8-3f4fa5e9628e\" (UID: \"4179b75e-befe-4e91-97d8-3f4fa5e9628e\") " Nov 24 15:18:33 crc kubenswrapper[5039]: I1124 15:18:33.486743 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4179b75e-befe-4e91-97d8-3f4fa5e9628e-utilities\") pod \"4179b75e-befe-4e91-97d8-3f4fa5e9628e\" (UID: \"4179b75e-befe-4e91-97d8-3f4fa5e9628e\") " Nov 24 15:18:33 crc kubenswrapper[5039]: I1124 15:18:33.487523 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4179b75e-befe-4e91-97d8-3f4fa5e9628e-utilities" (OuterVolumeSpecName: "utilities") pod "4179b75e-befe-4e91-97d8-3f4fa5e9628e" (UID: "4179b75e-befe-4e91-97d8-3f4fa5e9628e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:18:33 crc kubenswrapper[5039]: I1124 15:18:33.502922 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4179b75e-befe-4e91-97d8-3f4fa5e9628e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4179b75e-befe-4e91-97d8-3f4fa5e9628e" (UID: "4179b75e-befe-4e91-97d8-3f4fa5e9628e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:18:33 crc kubenswrapper[5039]: I1124 15:18:33.506718 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4179b75e-befe-4e91-97d8-3f4fa5e9628e-kube-api-access-xnm6q" (OuterVolumeSpecName: "kube-api-access-xnm6q") pod "4179b75e-befe-4e91-97d8-3f4fa5e9628e" (UID: "4179b75e-befe-4e91-97d8-3f4fa5e9628e"). InnerVolumeSpecName "kube-api-access-xnm6q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:18:33 crc kubenswrapper[5039]: I1124 15:18:33.589258 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4179b75e-befe-4e91-97d8-3f4fa5e9628e-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 15:18:33 crc kubenswrapper[5039]: I1124 15:18:33.589303 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xnm6q\" (UniqueName: \"kubernetes.io/projected/4179b75e-befe-4e91-97d8-3f4fa5e9628e-kube-api-access-xnm6q\") on node \"crc\" DevicePath \"\"" Nov 24 15:18:33 crc kubenswrapper[5039]: I1124 15:18:33.589317 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4179b75e-befe-4e91-97d8-3f4fa5e9628e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 15:18:34 crc kubenswrapper[5039]: I1124 15:18:34.409326 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tctwl" Nov 24 15:18:34 crc kubenswrapper[5039]: I1124 15:18:34.432649 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tctwl"] Nov 24 15:18:34 crc kubenswrapper[5039]: I1124 15:18:34.447622 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-tctwl"] Nov 24 15:18:35 crc kubenswrapper[5039]: I1124 15:18:35.306856 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:18:35 crc kubenswrapper[5039]: E1124 15:18:35.307199 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:18:36 crc kubenswrapper[5039]: I1124 15:18:36.320086 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4179b75e-befe-4e91-97d8-3f4fa5e9628e" path="/var/lib/kubelet/pods/4179b75e-befe-4e91-97d8-3f4fa5e9628e/volumes" Nov 24 15:18:40 crc kubenswrapper[5039]: I1124 15:18:40.878558 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2hsdg" Nov 24 15:18:40 crc kubenswrapper[5039]: I1124 15:18:40.879008 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2hsdg" Nov 24 15:18:40 crc kubenswrapper[5039]: I1124 15:18:40.938456 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2hsdg" Nov 24 15:18:41 crc kubenswrapper[5039]: I1124 15:18:41.547174 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2hsdg" Nov 24 15:18:41 crc kubenswrapper[5039]: I1124 15:18:41.627368 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2hsdg"] Nov 24 15:18:41 crc kubenswrapper[5039]: I1124 15:18:41.679220 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nqqfb"] Nov 24 15:18:41 crc kubenswrapper[5039]: I1124 15:18:41.679471 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-nqqfb" podUID="4b19d8ea-3c77-43f4-b236-9d5ee2edbafd" containerName="registry-server" containerID="cri-o://91c98b76ca53b44d70a15558f35ad1fdc1202198d17aa30c890ebbf5f24df483" gracePeriod=2 Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.209126 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nqqfb" Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.273094 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d9m7p\" (UniqueName: \"kubernetes.io/projected/4b19d8ea-3c77-43f4-b236-9d5ee2edbafd-kube-api-access-d9m7p\") pod \"4b19d8ea-3c77-43f4-b236-9d5ee2edbafd\" (UID: \"4b19d8ea-3c77-43f4-b236-9d5ee2edbafd\") " Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.273221 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b19d8ea-3c77-43f4-b236-9d5ee2edbafd-catalog-content\") pod \"4b19d8ea-3c77-43f4-b236-9d5ee2edbafd\" (UID: \"4b19d8ea-3c77-43f4-b236-9d5ee2edbafd\") " Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.273271 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b19d8ea-3c77-43f4-b236-9d5ee2edbafd-utilities\") pod \"4b19d8ea-3c77-43f4-b236-9d5ee2edbafd\" (UID: \"4b19d8ea-3c77-43f4-b236-9d5ee2edbafd\") " Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.281487 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b19d8ea-3c77-43f4-b236-9d5ee2edbafd-kube-api-access-d9m7p" (OuterVolumeSpecName: "kube-api-access-d9m7p") pod "4b19d8ea-3c77-43f4-b236-9d5ee2edbafd" (UID: "4b19d8ea-3c77-43f4-b236-9d5ee2edbafd"). InnerVolumeSpecName "kube-api-access-d9m7p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.288120 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b19d8ea-3c77-43f4-b236-9d5ee2edbafd-utilities" (OuterVolumeSpecName: "utilities") pod "4b19d8ea-3c77-43f4-b236-9d5ee2edbafd" (UID: "4b19d8ea-3c77-43f4-b236-9d5ee2edbafd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.377358 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b19d8ea-3c77-43f4-b236-9d5ee2edbafd-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.377397 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d9m7p\" (UniqueName: \"kubernetes.io/projected/4b19d8ea-3c77-43f4-b236-9d5ee2edbafd-kube-api-access-d9m7p\") on node \"crc\" DevicePath \"\"" Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.400496 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b19d8ea-3c77-43f4-b236-9d5ee2edbafd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4b19d8ea-3c77-43f4-b236-9d5ee2edbafd" (UID: "4b19d8ea-3c77-43f4-b236-9d5ee2edbafd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.479216 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b19d8ea-3c77-43f4-b236-9d5ee2edbafd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.490394 5039 generic.go:334] "Generic (PLEG): container finished" podID="4b19d8ea-3c77-43f4-b236-9d5ee2edbafd" containerID="91c98b76ca53b44d70a15558f35ad1fdc1202198d17aa30c890ebbf5f24df483" exitCode=0 Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.490459 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nqqfb" Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.490494 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nqqfb" event={"ID":"4b19d8ea-3c77-43f4-b236-9d5ee2edbafd","Type":"ContainerDied","Data":"91c98b76ca53b44d70a15558f35ad1fdc1202198d17aa30c890ebbf5f24df483"} Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.490568 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nqqfb" event={"ID":"4b19d8ea-3c77-43f4-b236-9d5ee2edbafd","Type":"ContainerDied","Data":"5417fa8cd7d0447347030496184abe980c3baf292fbb883dfabb943cf842e288"} Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.490586 5039 scope.go:117] "RemoveContainer" containerID="91c98b76ca53b44d70a15558f35ad1fdc1202198d17aa30c890ebbf5f24df483" Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.516833 5039 scope.go:117] "RemoveContainer" containerID="e201757b3b9da69b7228bec117d5b894806859b43835cab08b39aab75df736f5" Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.527563 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nqqfb"] Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.533573 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-nqqfb"] Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.534794 5039 scope.go:117] "RemoveContainer" containerID="ecae051b4a6c33352fbbcb9aec28c65d69a6321527e6c50425106ddfe6bfda08" Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.589113 5039 scope.go:117] "RemoveContainer" containerID="91c98b76ca53b44d70a15558f35ad1fdc1202198d17aa30c890ebbf5f24df483" Nov 24 15:18:42 crc kubenswrapper[5039]: E1124 15:18:42.589547 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91c98b76ca53b44d70a15558f35ad1fdc1202198d17aa30c890ebbf5f24df483\": container with ID starting with 91c98b76ca53b44d70a15558f35ad1fdc1202198d17aa30c890ebbf5f24df483 not found: ID does not exist" containerID="91c98b76ca53b44d70a15558f35ad1fdc1202198d17aa30c890ebbf5f24df483" Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.589590 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91c98b76ca53b44d70a15558f35ad1fdc1202198d17aa30c890ebbf5f24df483"} err="failed to get container status \"91c98b76ca53b44d70a15558f35ad1fdc1202198d17aa30c890ebbf5f24df483\": rpc error: code = NotFound desc = could not find container \"91c98b76ca53b44d70a15558f35ad1fdc1202198d17aa30c890ebbf5f24df483\": container with ID starting with 91c98b76ca53b44d70a15558f35ad1fdc1202198d17aa30c890ebbf5f24df483 not found: ID does not exist" Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.589617 5039 scope.go:117] "RemoveContainer" containerID="e201757b3b9da69b7228bec117d5b894806859b43835cab08b39aab75df736f5" Nov 24 15:18:42 crc kubenswrapper[5039]: E1124 15:18:42.589951 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e201757b3b9da69b7228bec117d5b894806859b43835cab08b39aab75df736f5\": container with ID starting with e201757b3b9da69b7228bec117d5b894806859b43835cab08b39aab75df736f5 not found: ID does not exist" containerID="e201757b3b9da69b7228bec117d5b894806859b43835cab08b39aab75df736f5" Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.589997 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e201757b3b9da69b7228bec117d5b894806859b43835cab08b39aab75df736f5"} err="failed to get container status \"e201757b3b9da69b7228bec117d5b894806859b43835cab08b39aab75df736f5\": rpc error: code = NotFound desc = could not find container \"e201757b3b9da69b7228bec117d5b894806859b43835cab08b39aab75df736f5\": container with ID starting with e201757b3b9da69b7228bec117d5b894806859b43835cab08b39aab75df736f5 not found: ID does not exist" Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.590031 5039 scope.go:117] "RemoveContainer" containerID="ecae051b4a6c33352fbbcb9aec28c65d69a6321527e6c50425106ddfe6bfda08" Nov 24 15:18:42 crc kubenswrapper[5039]: E1124 15:18:42.590345 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ecae051b4a6c33352fbbcb9aec28c65d69a6321527e6c50425106ddfe6bfda08\": container with ID starting with ecae051b4a6c33352fbbcb9aec28c65d69a6321527e6c50425106ddfe6bfda08 not found: ID does not exist" containerID="ecae051b4a6c33352fbbcb9aec28c65d69a6321527e6c50425106ddfe6bfda08" Nov 24 15:18:42 crc kubenswrapper[5039]: I1124 15:18:42.590381 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecae051b4a6c33352fbbcb9aec28c65d69a6321527e6c50425106ddfe6bfda08"} err="failed to get container status \"ecae051b4a6c33352fbbcb9aec28c65d69a6321527e6c50425106ddfe6bfda08\": rpc error: code = NotFound desc = could not find container \"ecae051b4a6c33352fbbcb9aec28c65d69a6321527e6c50425106ddfe6bfda08\": container with ID starting with ecae051b4a6c33352fbbcb9aec28c65d69a6321527e6c50425106ddfe6bfda08 not found: ID does not exist" Nov 24 15:18:44 crc kubenswrapper[5039]: I1124 15:18:44.320520 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b19d8ea-3c77-43f4-b236-9d5ee2edbafd" path="/var/lib/kubelet/pods/4b19d8ea-3c77-43f4-b236-9d5ee2edbafd/volumes" Nov 24 15:18:46 crc kubenswrapper[5039]: I1124 15:18:46.306889 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:18:46 crc kubenswrapper[5039]: E1124 15:18:46.307592 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:18:49 crc kubenswrapper[5039]: I1124 15:18:49.003787 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-4qp6m_1988c73c-a04d-4b50-af92-54dfc2a4a262/control-plane-machine-set-operator/0.log" Nov 24 15:18:49 crc kubenswrapper[5039]: I1124 15:18:49.138812 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-n2dwh_ae5ca663-7edb-49dd-a7a7-668eeace13f7/kube-rbac-proxy/0.log" Nov 24 15:18:49 crc kubenswrapper[5039]: I1124 15:18:49.160542 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-n2dwh_ae5ca663-7edb-49dd-a7a7-668eeace13f7/machine-api-operator/0.log" Nov 24 15:18:58 crc kubenswrapper[5039]: I1124 15:18:58.306904 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:18:58 crc kubenswrapper[5039]: E1124 15:18:58.307736 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:19:02 crc kubenswrapper[5039]: I1124 15:19:02.105659 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-8kpc5_ab33654d-a27e-4922-87c3-37d387a8dfa6/cert-manager-controller/0.log" Nov 24 15:19:02 crc kubenswrapper[5039]: I1124 15:19:02.183896 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-dwbfg_e8bfdf0d-df1c-4dda-8c3d-8113eee0ad4a/cert-manager-cainjector/0.log" Nov 24 15:19:02 crc kubenswrapper[5039]: I1124 15:19:02.279837 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-9rmz6_2526d128-0579-4f6f-9327-12ac7fe30e96/cert-manager-webhook/0.log" Nov 24 15:19:10 crc kubenswrapper[5039]: I1124 15:19:10.308374 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:19:10 crc kubenswrapper[5039]: E1124 15:19:10.309027 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:19:15 crc kubenswrapper[5039]: I1124 15:19:15.916962 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-9dqz9_1eee0206-49ea-45f5-8c34-547075ba3c65/nmstate-console-plugin/0.log" Nov 24 15:19:16 crc kubenswrapper[5039]: I1124 15:19:16.096073 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-ptxfm_e0e36cbb-009a-4784-a04d-95badbce22d0/nmstate-handler/0.log" Nov 24 15:19:16 crc kubenswrapper[5039]: I1124 15:19:16.147615 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-fnjvc_f006222b-71be-4a99-9b20-e048040bd042/kube-rbac-proxy/0.log" Nov 24 15:19:16 crc kubenswrapper[5039]: I1124 15:19:16.174307 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-fnjvc_f006222b-71be-4a99-9b20-e048040bd042/nmstate-metrics/0.log" Nov 24 15:19:16 crc kubenswrapper[5039]: I1124 15:19:16.310265 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-55t2b_a78868c4-aedd-4fe3-a055-5460cac9f6c4/nmstate-operator/0.log" Nov 24 15:19:16 crc kubenswrapper[5039]: I1124 15:19:16.393647 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-j8g7c_667e819e-83fc-453d-90d3-7b89b63e15a4/nmstate-webhook/0.log" Nov 24 15:19:25 crc kubenswrapper[5039]: I1124 15:19:25.308044 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:19:25 crc kubenswrapper[5039]: E1124 15:19:25.310417 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:19:29 crc kubenswrapper[5039]: I1124 15:19:29.676575 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-dfbf69d45-vngzb_e97f0fac-4f42-4ea9-b853-33c7aedeba68/manager/0.log" Nov 24 15:19:29 crc kubenswrapper[5039]: I1124 15:19:29.709033 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-dfbf69d45-vngzb_e97f0fac-4f42-4ea9-b853-33c7aedeba68/kube-rbac-proxy/0.log" Nov 24 15:19:39 crc kubenswrapper[5039]: I1124 15:19:39.306883 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:19:39 crc kubenswrapper[5039]: E1124 15:19:39.307601 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:19:45 crc kubenswrapper[5039]: I1124 15:19:45.200115 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_cluster-logging-operator-ff9846bd-8d4cv_ecb03566-7ffa-42ab-aa02-22bad9858b86/cluster-logging-operator/0.log" Nov 24 15:19:45 crc kubenswrapper[5039]: I1124 15:19:45.422739 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_collector-cqptg_a477b3d9-ef5d-4254-bc37-62f62a3ac851/collector/0.log" Nov 24 15:19:45 crc kubenswrapper[5039]: I1124 15:19:45.507929 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-compactor-0_a1e6b0b7-32a0-465a-a329-d060dbf0b8f9/loki-compactor/0.log" Nov 24 15:19:45 crc kubenswrapper[5039]: I1124 15:19:45.605027 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-distributor-76cc67bf56-xwwg7_37a01398-aa18-423a-8fa0-b3d1f5fe0cfd/loki-distributor/0.log" Nov 24 15:19:45 crc kubenswrapper[5039]: I1124 15:19:45.737605 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-797bc7dfc5-fjsvj_f025c7ee-097c-4915-9946-41b57f995f0d/gateway/0.log" Nov 24 15:19:45 crc kubenswrapper[5039]: I1124 15:19:45.797468 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-797bc7dfc5-fjsvj_f025c7ee-097c-4915-9946-41b57f995f0d/opa/0.log" Nov 24 15:19:45 crc kubenswrapper[5039]: I1124 15:19:45.898757 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-797bc7dfc5-zd8bg_c24389d6-c229-4c2b-9933-61cd5f9b81d3/gateway/0.log" Nov 24 15:19:45 crc kubenswrapper[5039]: I1124 15:19:45.947058 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-797bc7dfc5-zd8bg_c24389d6-c229-4c2b-9933-61cd5f9b81d3/opa/0.log" Nov 24 15:19:46 crc kubenswrapper[5039]: I1124 15:19:46.116975 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-index-gateway-0_771e7704-d64a-4536-adfb-2ca0e6356956/loki-index-gateway/0.log" Nov 24 15:19:46 crc kubenswrapper[5039]: I1124 15:19:46.188486 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-ingester-0_ffdab614-73c1-4ac9-adba-d2ec7ce81550/loki-ingester/0.log" Nov 24 15:19:46 crc kubenswrapper[5039]: I1124 15:19:46.319874 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-querier-5895d59bb8-4xrbf_f3453a5d-07da-4391-a2d5-df5154962b61/loki-querier/0.log" Nov 24 15:19:46 crc kubenswrapper[5039]: I1124 15:19:46.409568 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-query-frontend-84558f7c9f-qvz9s_0c93d652-05e2-4359-b5f6-6951e26ba7d2/loki-query-frontend/0.log" Nov 24 15:19:54 crc kubenswrapper[5039]: I1124 15:19:54.307433 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:19:54 crc kubenswrapper[5039]: E1124 15:19:54.308446 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:20:00 crc kubenswrapper[5039]: I1124 15:20:00.863894 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-m58j2_3e29f306-3558-4854-9ada-3ff94d2ad700/kube-rbac-proxy/0.log" Nov 24 15:20:00 crc kubenswrapper[5039]: I1124 15:20:00.993332 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-m58j2_3e29f306-3558-4854-9ada-3ff94d2ad700/controller/0.log" Nov 24 15:20:01 crc kubenswrapper[5039]: I1124 15:20:01.104999 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-frr-files/0.log" Nov 24 15:20:01 crc kubenswrapper[5039]: I1124 15:20:01.225056 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-frr-files/0.log" Nov 24 15:20:01 crc kubenswrapper[5039]: I1124 15:20:01.255140 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-reloader/0.log" Nov 24 15:20:01 crc kubenswrapper[5039]: I1124 15:20:01.295837 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-metrics/0.log" Nov 24 15:20:01 crc kubenswrapper[5039]: I1124 15:20:01.297190 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-reloader/0.log" Nov 24 15:20:01 crc kubenswrapper[5039]: I1124 15:20:01.499668 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-frr-files/0.log" Nov 24 15:20:01 crc kubenswrapper[5039]: I1124 15:20:01.523628 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-reloader/0.log" Nov 24 15:20:01 crc kubenswrapper[5039]: I1124 15:20:01.524184 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-metrics/0.log" Nov 24 15:20:01 crc kubenswrapper[5039]: I1124 15:20:01.553154 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-metrics/0.log" Nov 24 15:20:01 crc kubenswrapper[5039]: I1124 15:20:01.757282 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-reloader/0.log" Nov 24 15:20:01 crc kubenswrapper[5039]: I1124 15:20:01.773198 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-metrics/0.log" Nov 24 15:20:01 crc kubenswrapper[5039]: I1124 15:20:01.775802 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-frr-files/0.log" Nov 24 15:20:01 crc kubenswrapper[5039]: I1124 15:20:01.790392 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/controller/0.log" Nov 24 15:20:01 crc kubenswrapper[5039]: I1124 15:20:01.964815 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/kube-rbac-proxy/0.log" Nov 24 15:20:01 crc kubenswrapper[5039]: I1124 15:20:01.991553 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/frr-metrics/0.log" Nov 24 15:20:02 crc kubenswrapper[5039]: I1124 15:20:02.007110 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/kube-rbac-proxy-frr/0.log" Nov 24 15:20:02 crc kubenswrapper[5039]: I1124 15:20:02.162271 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/reloader/0.log" Nov 24 15:20:02 crc kubenswrapper[5039]: I1124 15:20:02.210294 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-kn25p_d341f082-ff80-43f0-aa5c-1476f8addb05/frr-k8s-webhook-server/0.log" Nov 24 15:20:02 crc kubenswrapper[5039]: I1124 15:20:02.468674 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-859686c6ff-hskrv_4af724f9-39c3-414e-a020-29da6a5bfac7/manager/0.log" Nov 24 15:20:02 crc kubenswrapper[5039]: I1124 15:20:02.743196 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-76f55458ff-26z8q_18cdfd31-117a-4b07-bdba-fc6703fcfa55/webhook-server/0.log" Nov 24 15:20:02 crc kubenswrapper[5039]: I1124 15:20:02.748544 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-7wr9z_fa0ca8a9-96d3-40dc-916f-97048b7112b0/kube-rbac-proxy/0.log" Nov 24 15:20:03 crc kubenswrapper[5039]: I1124 15:20:03.558980 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-7wr9z_fa0ca8a9-96d3-40dc-916f-97048b7112b0/speaker/0.log" Nov 24 15:20:04 crc kubenswrapper[5039]: I1124 15:20:04.121152 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/frr/0.log" Nov 24 15:20:05 crc kubenswrapper[5039]: I1124 15:20:05.307056 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:20:05 crc kubenswrapper[5039]: E1124 15:20:05.307666 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:20:16 crc kubenswrapper[5039]: I1124 15:20:16.934993 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz_d615d58f-8a19-4226-a022-26c3c2f46eaa/util/0.log" Nov 24 15:20:17 crc kubenswrapper[5039]: I1124 15:20:17.118622 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz_d615d58f-8a19-4226-a022-26c3c2f46eaa/util/0.log" Nov 24 15:20:17 crc kubenswrapper[5039]: I1124 15:20:17.126610 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz_d615d58f-8a19-4226-a022-26c3c2f46eaa/pull/0.log" Nov 24 15:20:17 crc kubenswrapper[5039]: I1124 15:20:17.150466 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz_d615d58f-8a19-4226-a022-26c3c2f46eaa/pull/0.log" Nov 24 15:20:17 crc kubenswrapper[5039]: I1124 15:20:17.306260 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz_d615d58f-8a19-4226-a022-26c3c2f46eaa/util/0.log" Nov 24 15:20:17 crc kubenswrapper[5039]: I1124 15:20:17.358185 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz_d615d58f-8a19-4226-a022-26c3c2f46eaa/pull/0.log" Nov 24 15:20:17 crc kubenswrapper[5039]: I1124 15:20:17.378285 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz_d615d58f-8a19-4226-a022-26c3c2f46eaa/extract/0.log" Nov 24 15:20:17 crc kubenswrapper[5039]: I1124 15:20:17.481928 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb_175053d9-6995-4edc-9e0b-a72a0e10ae72/util/0.log" Nov 24 15:20:17 crc kubenswrapper[5039]: I1124 15:20:17.644248 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb_175053d9-6995-4edc-9e0b-a72a0e10ae72/pull/0.log" Nov 24 15:20:17 crc kubenswrapper[5039]: I1124 15:20:17.657330 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb_175053d9-6995-4edc-9e0b-a72a0e10ae72/pull/0.log" Nov 24 15:20:17 crc kubenswrapper[5039]: I1124 15:20:17.671550 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb_175053d9-6995-4edc-9e0b-a72a0e10ae72/util/0.log" Nov 24 15:20:17 crc kubenswrapper[5039]: I1124 15:20:17.806874 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb_175053d9-6995-4edc-9e0b-a72a0e10ae72/util/0.log" Nov 24 15:20:17 crc kubenswrapper[5039]: I1124 15:20:17.816716 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb_175053d9-6995-4edc-9e0b-a72a0e10ae72/pull/0.log" Nov 24 15:20:17 crc kubenswrapper[5039]: I1124 15:20:17.826653 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb_175053d9-6995-4edc-9e0b-a72a0e10ae72/extract/0.log" Nov 24 15:20:17 crc kubenswrapper[5039]: I1124 15:20:17.999337 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b_f60bd1ab-ddc1-462f-85f9-e47d7305727d/util/0.log" Nov 24 15:20:18 crc kubenswrapper[5039]: I1124 15:20:18.184201 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b_f60bd1ab-ddc1-462f-85f9-e47d7305727d/util/0.log" Nov 24 15:20:18 crc kubenswrapper[5039]: I1124 15:20:18.184402 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b_f60bd1ab-ddc1-462f-85f9-e47d7305727d/pull/0.log" Nov 24 15:20:18 crc kubenswrapper[5039]: I1124 15:20:18.198467 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b_f60bd1ab-ddc1-462f-85f9-e47d7305727d/pull/0.log" Nov 24 15:20:18 crc kubenswrapper[5039]: I1124 15:20:18.354538 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b_f60bd1ab-ddc1-462f-85f9-e47d7305727d/util/0.log" Nov 24 15:20:18 crc kubenswrapper[5039]: I1124 15:20:18.384012 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b_f60bd1ab-ddc1-462f-85f9-e47d7305727d/extract/0.log" Nov 24 15:20:18 crc kubenswrapper[5039]: I1124 15:20:18.392626 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b_f60bd1ab-ddc1-462f-85f9-e47d7305727d/pull/0.log" Nov 24 15:20:18 crc kubenswrapper[5039]: I1124 15:20:18.529642 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6_8153476f-9f52-4a9b-9976-f71664f6f667/util/0.log" Nov 24 15:20:18 crc kubenswrapper[5039]: I1124 15:20:18.717823 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6_8153476f-9f52-4a9b-9976-f71664f6f667/util/0.log" Nov 24 15:20:18 crc kubenswrapper[5039]: I1124 15:20:18.718142 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6_8153476f-9f52-4a9b-9976-f71664f6f667/pull/0.log" Nov 24 15:20:18 crc kubenswrapper[5039]: I1124 15:20:18.734703 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6_8153476f-9f52-4a9b-9976-f71664f6f667/pull/0.log" Nov 24 15:20:18 crc kubenswrapper[5039]: I1124 15:20:18.887792 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6_8153476f-9f52-4a9b-9976-f71664f6f667/util/0.log" Nov 24 15:20:18 crc kubenswrapper[5039]: I1124 15:20:18.898403 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6_8153476f-9f52-4a9b-9976-f71664f6f667/pull/0.log" Nov 24 15:20:18 crc kubenswrapper[5039]: I1124 15:20:18.901451 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6_8153476f-9f52-4a9b-9976-f71664f6f667/extract/0.log" Nov 24 15:20:19 crc kubenswrapper[5039]: I1124 15:20:19.035151 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2hsdg_71436ae2-8c4f-46bd-b877-b93ab84dbdac/extract-utilities/0.log" Nov 24 15:20:19 crc kubenswrapper[5039]: I1124 15:20:19.218576 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2hsdg_71436ae2-8c4f-46bd-b877-b93ab84dbdac/extract-content/0.log" Nov 24 15:20:19 crc kubenswrapper[5039]: I1124 15:20:19.252946 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2hsdg_71436ae2-8c4f-46bd-b877-b93ab84dbdac/extract-utilities/0.log" Nov 24 15:20:19 crc kubenswrapper[5039]: I1124 15:20:19.291706 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2hsdg_71436ae2-8c4f-46bd-b877-b93ab84dbdac/extract-content/0.log" Nov 24 15:20:19 crc kubenswrapper[5039]: I1124 15:20:19.306675 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:20:19 crc kubenswrapper[5039]: E1124 15:20:19.307001 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:20:19 crc kubenswrapper[5039]: I1124 15:20:19.425771 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2hsdg_71436ae2-8c4f-46bd-b877-b93ab84dbdac/extract-utilities/0.log" Nov 24 15:20:19 crc kubenswrapper[5039]: I1124 15:20:19.545569 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2hsdg_71436ae2-8c4f-46bd-b877-b93ab84dbdac/extract-content/0.log" Nov 24 15:20:19 crc kubenswrapper[5039]: I1124 15:20:19.573537 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2hsdg_71436ae2-8c4f-46bd-b877-b93ab84dbdac/registry-server/0.log" Nov 24 15:20:19 crc kubenswrapper[5039]: I1124 15:20:19.635686 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dlptg_592272ed-6a8c-42d9-8c87-b62ba335267c/extract-utilities/0.log" Nov 24 15:20:19 crc kubenswrapper[5039]: I1124 15:20:19.846695 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dlptg_592272ed-6a8c-42d9-8c87-b62ba335267c/extract-content/0.log" Nov 24 15:20:19 crc kubenswrapper[5039]: I1124 15:20:19.858710 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dlptg_592272ed-6a8c-42d9-8c87-b62ba335267c/extract-content/0.log" Nov 24 15:20:19 crc kubenswrapper[5039]: I1124 15:20:19.862156 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dlptg_592272ed-6a8c-42d9-8c87-b62ba335267c/extract-utilities/0.log" Nov 24 15:20:20 crc kubenswrapper[5039]: I1124 15:20:20.014929 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dlptg_592272ed-6a8c-42d9-8c87-b62ba335267c/extract-utilities/0.log" Nov 24 15:20:20 crc kubenswrapper[5039]: I1124 15:20:20.057936 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dlptg_592272ed-6a8c-42d9-8c87-b62ba335267c/extract-content/0.log" Nov 24 15:20:20 crc kubenswrapper[5039]: I1124 15:20:20.295408 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk_8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130/util/0.log" Nov 24 15:20:20 crc kubenswrapper[5039]: I1124 15:20:20.401433 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk_8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130/pull/0.log" Nov 24 15:20:20 crc kubenswrapper[5039]: I1124 15:20:20.443732 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dlptg_592272ed-6a8c-42d9-8c87-b62ba335267c/registry-server/0.log" Nov 24 15:20:20 crc kubenswrapper[5039]: I1124 15:20:20.448338 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk_8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130/util/0.log" Nov 24 15:20:20 crc kubenswrapper[5039]: I1124 15:20:20.455489 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk_8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130/pull/0.log" Nov 24 15:20:20 crc kubenswrapper[5039]: I1124 15:20:20.604114 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk_8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130/util/0.log" Nov 24 15:20:20 crc kubenswrapper[5039]: I1124 15:20:20.621781 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk_8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130/pull/0.log" Nov 24 15:20:20 crc kubenswrapper[5039]: I1124 15:20:20.639182 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk_8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130/extract/0.log" Nov 24 15:20:20 crc kubenswrapper[5039]: I1124 15:20:20.700613 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-zhpf5_c0bf8d9e-d6fb-400d-8fa2-d547a9a64107/marketplace-operator/0.log" Nov 24 15:20:20 crc kubenswrapper[5039]: I1124 15:20:20.809794 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-c92b4_91b2d0f6-ea36-4860-aa8a-2645a1a44741/extract-utilities/0.log" Nov 24 15:20:21 crc kubenswrapper[5039]: I1124 15:20:21.003306 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-c92b4_91b2d0f6-ea36-4860-aa8a-2645a1a44741/extract-content/0.log" Nov 24 15:20:21 crc kubenswrapper[5039]: I1124 15:20:21.010484 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-c92b4_91b2d0f6-ea36-4860-aa8a-2645a1a44741/extract-utilities/0.log" Nov 24 15:20:21 crc kubenswrapper[5039]: I1124 15:20:21.024805 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-c92b4_91b2d0f6-ea36-4860-aa8a-2645a1a44741/extract-content/0.log" Nov 24 15:20:21 crc kubenswrapper[5039]: I1124 15:20:21.202227 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-c92b4_91b2d0f6-ea36-4860-aa8a-2645a1a44741/extract-content/0.log" Nov 24 15:20:21 crc kubenswrapper[5039]: I1124 15:20:21.213320 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-c92b4_91b2d0f6-ea36-4860-aa8a-2645a1a44741/extract-utilities/0.log" Nov 24 15:20:21 crc kubenswrapper[5039]: I1124 15:20:21.242606 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bkhz2_36fbfba2-65ab-44a5-9e69-0f6c67426f55/extract-utilities/0.log" Nov 24 15:20:21 crc kubenswrapper[5039]: I1124 15:20:21.423513 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bkhz2_36fbfba2-65ab-44a5-9e69-0f6c67426f55/extract-content/0.log" Nov 24 15:20:21 crc kubenswrapper[5039]: I1124 15:20:21.436426 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bkhz2_36fbfba2-65ab-44a5-9e69-0f6c67426f55/extract-utilities/0.log" Nov 24 15:20:21 crc kubenswrapper[5039]: I1124 15:20:21.496258 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-c92b4_91b2d0f6-ea36-4860-aa8a-2645a1a44741/registry-server/0.log" Nov 24 15:20:21 crc kubenswrapper[5039]: I1124 15:20:21.517133 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bkhz2_36fbfba2-65ab-44a5-9e69-0f6c67426f55/extract-content/0.log" Nov 24 15:20:21 crc kubenswrapper[5039]: I1124 15:20:21.649483 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bkhz2_36fbfba2-65ab-44a5-9e69-0f6c67426f55/extract-utilities/0.log" Nov 24 15:20:21 crc kubenswrapper[5039]: I1124 15:20:21.660094 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bkhz2_36fbfba2-65ab-44a5-9e69-0f6c67426f55/extract-content/0.log" Nov 24 15:20:22 crc kubenswrapper[5039]: I1124 15:20:22.148909 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bkhz2_36fbfba2-65ab-44a5-9e69-0f6c67426f55/registry-server/0.log" Nov 24 15:20:33 crc kubenswrapper[5039]: I1124 15:20:33.337466 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:20:33 crc kubenswrapper[5039]: I1124 15:20:33.810586 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"c63b161b15ffbf31f7bd32b6256206164578c32ffa789a8c58aea4157b4a1605"} Nov 24 15:20:35 crc kubenswrapper[5039]: I1124 15:20:35.211648 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-dnvjn_21190c12-076c-4263-a68d-6dc4117e1d10/prometheus-operator/0.log" Nov 24 15:20:35 crc kubenswrapper[5039]: I1124 15:20:35.438996 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq_eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e/prometheus-operator-admission-webhook/0.log" Nov 24 15:20:35 crc kubenswrapper[5039]: I1124 15:20:35.487645 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w_79c1d6a0-9ed7-48c8-8a09-e4695a89d953/prometheus-operator-admission-webhook/0.log" Nov 24 15:20:35 crc kubenswrapper[5039]: I1124 15:20:35.717141 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-6t44d_dcdfb73e-765a-4fba-bdcb-0ca1cd215211/operator/0.log" Nov 24 15:20:35 crc kubenswrapper[5039]: I1124 15:20:35.753756 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-7d5fb4cbfb-s2gqw_7d53a76d-409b-45a1-8000-d4f8f2b1ac18/observability-ui-dashboards/0.log" Nov 24 15:20:35 crc kubenswrapper[5039]: I1124 15:20:35.882303 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-kkzn2_f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b/perses-operator/0.log" Nov 24 15:20:51 crc kubenswrapper[5039]: I1124 15:20:51.334027 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-dfbf69d45-vngzb_e97f0fac-4f42-4ea9-b853-33c7aedeba68/manager/0.log" Nov 24 15:20:51 crc kubenswrapper[5039]: I1124 15:20:51.350523 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-dfbf69d45-vngzb_e97f0fac-4f42-4ea9-b853-33c7aedeba68/kube-rbac-proxy/0.log" Nov 24 15:22:01 crc kubenswrapper[5039]: I1124 15:22:01.205809 5039 scope.go:117] "RemoveContainer" containerID="b06ca7bfd51ae89a7fc61d0297c878da9e18b9b1359e0d2016f731077d15c981" Nov 24 15:22:45 crc kubenswrapper[5039]: I1124 15:22:45.522142 5039 generic.go:334] "Generic (PLEG): container finished" podID="32832beb-344a-48ae-a105-25a29b2d4b1d" containerID="8e9a81e949051fabfc7cf5240ce64d395951d86a6d9c781bf8e2e059aa28549a" exitCode=0 Nov 24 15:22:45 crc kubenswrapper[5039]: I1124 15:22:45.522371 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-76clq/must-gather-qq2vm" event={"ID":"32832beb-344a-48ae-a105-25a29b2d4b1d","Type":"ContainerDied","Data":"8e9a81e949051fabfc7cf5240ce64d395951d86a6d9c781bf8e2e059aa28549a"} Nov 24 15:22:45 crc kubenswrapper[5039]: I1124 15:22:45.523602 5039 scope.go:117] "RemoveContainer" containerID="8e9a81e949051fabfc7cf5240ce64d395951d86a6d9c781bf8e2e059aa28549a" Nov 24 15:22:45 crc kubenswrapper[5039]: I1124 15:22:45.942754 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-76clq_must-gather-qq2vm_32832beb-344a-48ae-a105-25a29b2d4b1d/gather/0.log" Nov 24 15:22:50 crc kubenswrapper[5039]: I1124 15:22:50.101348 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 15:22:50 crc kubenswrapper[5039]: I1124 15:22:50.104351 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 15:22:54 crc kubenswrapper[5039]: I1124 15:22:54.383160 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-76clq/must-gather-qq2vm"] Nov 24 15:22:54 crc kubenswrapper[5039]: I1124 15:22:54.384403 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-76clq/must-gather-qq2vm" podUID="32832beb-344a-48ae-a105-25a29b2d4b1d" containerName="copy" containerID="cri-o://fc78bdeaec3ba62dd002284d818691b0aa8bf1b5c6a6d4c02154dcd2edcd6c78" gracePeriod=2 Nov 24 15:22:54 crc kubenswrapper[5039]: I1124 15:22:54.395075 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-76clq/must-gather-qq2vm"] Nov 24 15:22:54 crc kubenswrapper[5039]: I1124 15:22:54.646275 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-76clq_must-gather-qq2vm_32832beb-344a-48ae-a105-25a29b2d4b1d/copy/0.log" Nov 24 15:22:54 crc kubenswrapper[5039]: I1124 15:22:54.647490 5039 generic.go:334] "Generic (PLEG): container finished" podID="32832beb-344a-48ae-a105-25a29b2d4b1d" containerID="fc78bdeaec3ba62dd002284d818691b0aa8bf1b5c6a6d4c02154dcd2edcd6c78" exitCode=143 Nov 24 15:22:54 crc kubenswrapper[5039]: I1124 15:22:54.957422 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-76clq_must-gather-qq2vm_32832beb-344a-48ae-a105-25a29b2d4b1d/copy/0.log" Nov 24 15:22:54 crc kubenswrapper[5039]: I1124 15:22:54.958347 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-76clq/must-gather-qq2vm" Nov 24 15:22:55 crc kubenswrapper[5039]: I1124 15:22:55.129237 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/32832beb-344a-48ae-a105-25a29b2d4b1d-must-gather-output\") pod \"32832beb-344a-48ae-a105-25a29b2d4b1d\" (UID: \"32832beb-344a-48ae-a105-25a29b2d4b1d\") " Nov 24 15:22:55 crc kubenswrapper[5039]: I1124 15:22:55.129484 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k24l7\" (UniqueName: \"kubernetes.io/projected/32832beb-344a-48ae-a105-25a29b2d4b1d-kube-api-access-k24l7\") pod \"32832beb-344a-48ae-a105-25a29b2d4b1d\" (UID: \"32832beb-344a-48ae-a105-25a29b2d4b1d\") " Nov 24 15:22:55 crc kubenswrapper[5039]: I1124 15:22:55.139217 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32832beb-344a-48ae-a105-25a29b2d4b1d-kube-api-access-k24l7" (OuterVolumeSpecName: "kube-api-access-k24l7") pod "32832beb-344a-48ae-a105-25a29b2d4b1d" (UID: "32832beb-344a-48ae-a105-25a29b2d4b1d"). InnerVolumeSpecName "kube-api-access-k24l7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:22:55 crc kubenswrapper[5039]: I1124 15:22:55.232941 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k24l7\" (UniqueName: \"kubernetes.io/projected/32832beb-344a-48ae-a105-25a29b2d4b1d-kube-api-access-k24l7\") on node \"crc\" DevicePath \"\"" Nov 24 15:22:55 crc kubenswrapper[5039]: I1124 15:22:55.341227 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32832beb-344a-48ae-a105-25a29b2d4b1d-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "32832beb-344a-48ae-a105-25a29b2d4b1d" (UID: "32832beb-344a-48ae-a105-25a29b2d4b1d"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:22:55 crc kubenswrapper[5039]: I1124 15:22:55.438241 5039 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/32832beb-344a-48ae-a105-25a29b2d4b1d-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 24 15:22:55 crc kubenswrapper[5039]: I1124 15:22:55.660895 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-76clq_must-gather-qq2vm_32832beb-344a-48ae-a105-25a29b2d4b1d/copy/0.log" Nov 24 15:22:55 crc kubenswrapper[5039]: I1124 15:22:55.661332 5039 scope.go:117] "RemoveContainer" containerID="fc78bdeaec3ba62dd002284d818691b0aa8bf1b5c6a6d4c02154dcd2edcd6c78" Nov 24 15:22:55 crc kubenswrapper[5039]: I1124 15:22:55.661371 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-76clq/must-gather-qq2vm" Nov 24 15:22:55 crc kubenswrapper[5039]: I1124 15:22:55.682167 5039 scope.go:117] "RemoveContainer" containerID="8e9a81e949051fabfc7cf5240ce64d395951d86a6d9c781bf8e2e059aa28549a" Nov 24 15:22:56 crc kubenswrapper[5039]: I1124 15:22:56.320667 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32832beb-344a-48ae-a105-25a29b2d4b1d" path="/var/lib/kubelet/pods/32832beb-344a-48ae-a105-25a29b2d4b1d/volumes" Nov 24 15:23:01 crc kubenswrapper[5039]: I1124 15:23:01.325403 5039 scope.go:117] "RemoveContainer" containerID="eee1d977deb0f22f4ced149bc1b038ef2001e44d86b2500817c2fa4d6330bd82" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.667735 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bql48"] Nov 24 15:23:08 crc kubenswrapper[5039]: E1124 15:23:08.668594 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32832beb-344a-48ae-a105-25a29b2d4b1d" containerName="copy" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.668606 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="32832beb-344a-48ae-a105-25a29b2d4b1d" containerName="copy" Nov 24 15:23:08 crc kubenswrapper[5039]: E1124 15:23:08.668617 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4179b75e-befe-4e91-97d8-3f4fa5e9628e" containerName="registry-server" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.668623 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="4179b75e-befe-4e91-97d8-3f4fa5e9628e" containerName="registry-server" Nov 24 15:23:08 crc kubenswrapper[5039]: E1124 15:23:08.668638 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32832beb-344a-48ae-a105-25a29b2d4b1d" containerName="gather" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.668645 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="32832beb-344a-48ae-a105-25a29b2d4b1d" containerName="gather" Nov 24 15:23:08 crc kubenswrapper[5039]: E1124 15:23:08.668674 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4179b75e-befe-4e91-97d8-3f4fa5e9628e" containerName="extract-content" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.668680 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="4179b75e-befe-4e91-97d8-3f4fa5e9628e" containerName="extract-content" Nov 24 15:23:08 crc kubenswrapper[5039]: E1124 15:23:08.668694 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b19d8ea-3c77-43f4-b236-9d5ee2edbafd" containerName="extract-content" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.668700 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b19d8ea-3c77-43f4-b236-9d5ee2edbafd" containerName="extract-content" Nov 24 15:23:08 crc kubenswrapper[5039]: E1124 15:23:08.668714 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b19d8ea-3c77-43f4-b236-9d5ee2edbafd" containerName="registry-server" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.668720 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b19d8ea-3c77-43f4-b236-9d5ee2edbafd" containerName="registry-server" Nov 24 15:23:08 crc kubenswrapper[5039]: E1124 15:23:08.668738 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b19d8ea-3c77-43f4-b236-9d5ee2edbafd" containerName="extract-utilities" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.668744 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b19d8ea-3c77-43f4-b236-9d5ee2edbafd" containerName="extract-utilities" Nov 24 15:23:08 crc kubenswrapper[5039]: E1124 15:23:08.668753 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4179b75e-befe-4e91-97d8-3f4fa5e9628e" containerName="extract-utilities" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.668758 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="4179b75e-befe-4e91-97d8-3f4fa5e9628e" containerName="extract-utilities" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.668947 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="32832beb-344a-48ae-a105-25a29b2d4b1d" containerName="copy" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.668964 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b19d8ea-3c77-43f4-b236-9d5ee2edbafd" containerName="registry-server" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.668976 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="32832beb-344a-48ae-a105-25a29b2d4b1d" containerName="gather" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.668994 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="4179b75e-befe-4e91-97d8-3f4fa5e9628e" containerName="registry-server" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.670695 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bql48" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.685547 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bql48"] Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.837473 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3ec6f18-f5f7-4fc3-ad28-8cd028459b52-catalog-content\") pod \"community-operators-bql48\" (UID: \"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52\") " pod="openshift-marketplace/community-operators-bql48" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.838146 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3ec6f18-f5f7-4fc3-ad28-8cd028459b52-utilities\") pod \"community-operators-bql48\" (UID: \"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52\") " pod="openshift-marketplace/community-operators-bql48" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.838264 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbtp5\" (UniqueName: \"kubernetes.io/projected/a3ec6f18-f5f7-4fc3-ad28-8cd028459b52-kube-api-access-qbtp5\") pod \"community-operators-bql48\" (UID: \"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52\") " pod="openshift-marketplace/community-operators-bql48" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.941104 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3ec6f18-f5f7-4fc3-ad28-8cd028459b52-catalog-content\") pod \"community-operators-bql48\" (UID: \"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52\") " pod="openshift-marketplace/community-operators-bql48" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.941219 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3ec6f18-f5f7-4fc3-ad28-8cd028459b52-utilities\") pod \"community-operators-bql48\" (UID: \"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52\") " pod="openshift-marketplace/community-operators-bql48" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.941256 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbtp5\" (UniqueName: \"kubernetes.io/projected/a3ec6f18-f5f7-4fc3-ad28-8cd028459b52-kube-api-access-qbtp5\") pod \"community-operators-bql48\" (UID: \"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52\") " pod="openshift-marketplace/community-operators-bql48" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.942014 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3ec6f18-f5f7-4fc3-ad28-8cd028459b52-catalog-content\") pod \"community-operators-bql48\" (UID: \"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52\") " pod="openshift-marketplace/community-operators-bql48" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.942025 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3ec6f18-f5f7-4fc3-ad28-8cd028459b52-utilities\") pod \"community-operators-bql48\" (UID: \"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52\") " pod="openshift-marketplace/community-operators-bql48" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.959385 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbtp5\" (UniqueName: \"kubernetes.io/projected/a3ec6f18-f5f7-4fc3-ad28-8cd028459b52-kube-api-access-qbtp5\") pod \"community-operators-bql48\" (UID: \"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52\") " pod="openshift-marketplace/community-operators-bql48" Nov 24 15:23:08 crc kubenswrapper[5039]: I1124 15:23:08.994530 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bql48" Nov 24 15:23:09 crc kubenswrapper[5039]: I1124 15:23:09.591478 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bql48"] Nov 24 15:23:10 crc kubenswrapper[5039]: I1124 15:23:10.113705 5039 generic.go:334] "Generic (PLEG): container finished" podID="a3ec6f18-f5f7-4fc3-ad28-8cd028459b52" containerID="7a7ed4ec97fa3fe38858c6fd033607fcf3eb28e0b05b8ed51f25609c20eee193" exitCode=0 Nov 24 15:23:10 crc kubenswrapper[5039]: I1124 15:23:10.113813 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bql48" event={"ID":"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52","Type":"ContainerDied","Data":"7a7ed4ec97fa3fe38858c6fd033607fcf3eb28e0b05b8ed51f25609c20eee193"} Nov 24 15:23:10 crc kubenswrapper[5039]: I1124 15:23:10.114118 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bql48" event={"ID":"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52","Type":"ContainerStarted","Data":"725c97cc98e5fcd84bb570676e35447a9579ffc6e08570f5a4cfe915acd538df"} Nov 24 15:23:10 crc kubenswrapper[5039]: I1124 15:23:10.117597 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 15:23:11 crc kubenswrapper[5039]: I1124 15:23:11.130051 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bql48" event={"ID":"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52","Type":"ContainerStarted","Data":"66d6602fd80785d21232041a1e3ce588e0a878d9ed984b4531e1d260c81cb1d1"} Nov 24 15:23:13 crc kubenswrapper[5039]: I1124 15:23:13.159298 5039 generic.go:334] "Generic (PLEG): container finished" podID="a3ec6f18-f5f7-4fc3-ad28-8cd028459b52" containerID="66d6602fd80785d21232041a1e3ce588e0a878d9ed984b4531e1d260c81cb1d1" exitCode=0 Nov 24 15:23:13 crc kubenswrapper[5039]: I1124 15:23:13.159392 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bql48" event={"ID":"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52","Type":"ContainerDied","Data":"66d6602fd80785d21232041a1e3ce588e0a878d9ed984b4531e1d260c81cb1d1"} Nov 24 15:23:14 crc kubenswrapper[5039]: I1124 15:23:14.177401 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bql48" event={"ID":"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52","Type":"ContainerStarted","Data":"3bdde357ab9cd3b8409e91a4f3ee02f91bc49098ed21b0f945c9408feb095aab"} Nov 24 15:23:14 crc kubenswrapper[5039]: I1124 15:23:14.210985 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bql48" podStartSLOduration=2.717799154 podStartE2EDuration="6.210963204s" podCreationTimestamp="2025-11-24 15:23:08 +0000 UTC" firstStartedPulling="2025-11-24 15:23:10.117108595 +0000 UTC m=+7502.556233125" lastFinishedPulling="2025-11-24 15:23:13.610272675 +0000 UTC m=+7506.049397175" observedRunningTime="2025-11-24 15:23:14.203764978 +0000 UTC m=+7506.642889488" watchObservedRunningTime="2025-11-24 15:23:14.210963204 +0000 UTC m=+7506.650087704" Nov 24 15:23:18 crc kubenswrapper[5039]: I1124 15:23:18.995336 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bql48" Nov 24 15:23:18 crc kubenswrapper[5039]: I1124 15:23:18.995828 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bql48" Nov 24 15:23:19 crc kubenswrapper[5039]: I1124 15:23:19.067929 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bql48" Nov 24 15:23:19 crc kubenswrapper[5039]: I1124 15:23:19.315718 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bql48" Nov 24 15:23:19 crc kubenswrapper[5039]: I1124 15:23:19.389431 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bql48"] Nov 24 15:23:20 crc kubenswrapper[5039]: I1124 15:23:20.102758 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 15:23:20 crc kubenswrapper[5039]: I1124 15:23:20.102847 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 15:23:21 crc kubenswrapper[5039]: I1124 15:23:21.267041 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bql48" podUID="a3ec6f18-f5f7-4fc3-ad28-8cd028459b52" containerName="registry-server" containerID="cri-o://3bdde357ab9cd3b8409e91a4f3ee02f91bc49098ed21b0f945c9408feb095aab" gracePeriod=2 Nov 24 15:23:22 crc kubenswrapper[5039]: I1124 15:23:22.280169 5039 generic.go:334] "Generic (PLEG): container finished" podID="a3ec6f18-f5f7-4fc3-ad28-8cd028459b52" containerID="3bdde357ab9cd3b8409e91a4f3ee02f91bc49098ed21b0f945c9408feb095aab" exitCode=0 Nov 24 15:23:22 crc kubenswrapper[5039]: I1124 15:23:22.280463 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bql48" event={"ID":"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52","Type":"ContainerDied","Data":"3bdde357ab9cd3b8409e91a4f3ee02f91bc49098ed21b0f945c9408feb095aab"} Nov 24 15:23:22 crc kubenswrapper[5039]: I1124 15:23:22.280536 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bql48" event={"ID":"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52","Type":"ContainerDied","Data":"725c97cc98e5fcd84bb570676e35447a9579ffc6e08570f5a4cfe915acd538df"} Nov 24 15:23:22 crc kubenswrapper[5039]: I1124 15:23:22.280554 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="725c97cc98e5fcd84bb570676e35447a9579ffc6e08570f5a4cfe915acd538df" Nov 24 15:23:22 crc kubenswrapper[5039]: I1124 15:23:22.398684 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bql48" Nov 24 15:23:22 crc kubenswrapper[5039]: I1124 15:23:22.470829 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3ec6f18-f5f7-4fc3-ad28-8cd028459b52-utilities\") pod \"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52\" (UID: \"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52\") " Nov 24 15:23:22 crc kubenswrapper[5039]: I1124 15:23:22.471205 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbtp5\" (UniqueName: \"kubernetes.io/projected/a3ec6f18-f5f7-4fc3-ad28-8cd028459b52-kube-api-access-qbtp5\") pod \"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52\" (UID: \"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52\") " Nov 24 15:23:22 crc kubenswrapper[5039]: I1124 15:23:22.471470 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3ec6f18-f5f7-4fc3-ad28-8cd028459b52-catalog-content\") pod \"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52\" (UID: \"a3ec6f18-f5f7-4fc3-ad28-8cd028459b52\") " Nov 24 15:23:22 crc kubenswrapper[5039]: I1124 15:23:22.472237 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a3ec6f18-f5f7-4fc3-ad28-8cd028459b52-utilities" (OuterVolumeSpecName: "utilities") pod "a3ec6f18-f5f7-4fc3-ad28-8cd028459b52" (UID: "a3ec6f18-f5f7-4fc3-ad28-8cd028459b52"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:23:22 crc kubenswrapper[5039]: I1124 15:23:22.472790 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3ec6f18-f5f7-4fc3-ad28-8cd028459b52-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 15:23:22 crc kubenswrapper[5039]: I1124 15:23:22.479639 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3ec6f18-f5f7-4fc3-ad28-8cd028459b52-kube-api-access-qbtp5" (OuterVolumeSpecName: "kube-api-access-qbtp5") pod "a3ec6f18-f5f7-4fc3-ad28-8cd028459b52" (UID: "a3ec6f18-f5f7-4fc3-ad28-8cd028459b52"). InnerVolumeSpecName "kube-api-access-qbtp5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:23:22 crc kubenswrapper[5039]: I1124 15:23:22.521604 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a3ec6f18-f5f7-4fc3-ad28-8cd028459b52-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a3ec6f18-f5f7-4fc3-ad28-8cd028459b52" (UID: "a3ec6f18-f5f7-4fc3-ad28-8cd028459b52"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:23:22 crc kubenswrapper[5039]: I1124 15:23:22.575168 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3ec6f18-f5f7-4fc3-ad28-8cd028459b52-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 15:23:22 crc kubenswrapper[5039]: I1124 15:23:22.575196 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbtp5\" (UniqueName: \"kubernetes.io/projected/a3ec6f18-f5f7-4fc3-ad28-8cd028459b52-kube-api-access-qbtp5\") on node \"crc\" DevicePath \"\"" Nov 24 15:23:23 crc kubenswrapper[5039]: I1124 15:23:23.289102 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bql48" Nov 24 15:23:23 crc kubenswrapper[5039]: I1124 15:23:23.341789 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bql48"] Nov 24 15:23:23 crc kubenswrapper[5039]: I1124 15:23:23.353932 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bql48"] Nov 24 15:23:24 crc kubenswrapper[5039]: I1124 15:23:24.317055 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a3ec6f18-f5f7-4fc3-ad28-8cd028459b52" path="/var/lib/kubelet/pods/a3ec6f18-f5f7-4fc3-ad28-8cd028459b52/volumes" Nov 24 15:23:50 crc kubenswrapper[5039]: I1124 15:23:50.101262 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 15:23:50 crc kubenswrapper[5039]: I1124 15:23:50.101927 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 15:23:50 crc kubenswrapper[5039]: I1124 15:23:50.101979 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 15:23:50 crc kubenswrapper[5039]: I1124 15:23:50.102941 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c63b161b15ffbf31f7bd32b6256206164578c32ffa789a8c58aea4157b4a1605"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 15:23:50 crc kubenswrapper[5039]: I1124 15:23:50.103010 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://c63b161b15ffbf31f7bd32b6256206164578c32ffa789a8c58aea4157b4a1605" gracePeriod=600 Nov 24 15:23:50 crc kubenswrapper[5039]: I1124 15:23:50.647747 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="c63b161b15ffbf31f7bd32b6256206164578c32ffa789a8c58aea4157b4a1605" exitCode=0 Nov 24 15:23:50 crc kubenswrapper[5039]: I1124 15:23:50.648357 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"c63b161b15ffbf31f7bd32b6256206164578c32ffa789a8c58aea4157b4a1605"} Nov 24 15:23:50 crc kubenswrapper[5039]: I1124 15:23:50.648615 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7"} Nov 24 15:23:50 crc kubenswrapper[5039]: I1124 15:23:50.648647 5039 scope.go:117] "RemoveContainer" containerID="47c5fb9742bc004378188c1c0fcb91b27bea9291b53c98e103838b752d85dbc3" Nov 24 15:25:01 crc kubenswrapper[5039]: I1124 15:25:01.510040 5039 scope.go:117] "RemoveContainer" containerID="9a39f149599b22aa451a68994470a48f8020a52cddb9a363b0cd380578262fe0" Nov 24 15:25:01 crc kubenswrapper[5039]: I1124 15:25:01.540585 5039 scope.go:117] "RemoveContainer" containerID="c3e62ef071581329751726008c04f0b7b6ef99c40ec9254ccd8e9eefc369bd84" Nov 24 15:25:01 crc kubenswrapper[5039]: I1124 15:25:01.633597 5039 scope.go:117] "RemoveContainer" containerID="abc3f2233be19eafe7cfabeeeb3e5863837aeaf1b399edba17252495a634e88a" Nov 24 15:25:45 crc kubenswrapper[5039]: I1124 15:25:45.954322 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-k88gt/must-gather-wtzc2"] Nov 24 15:25:45 crc kubenswrapper[5039]: E1124 15:25:45.955190 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3ec6f18-f5f7-4fc3-ad28-8cd028459b52" containerName="extract-utilities" Nov 24 15:25:45 crc kubenswrapper[5039]: I1124 15:25:45.955203 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3ec6f18-f5f7-4fc3-ad28-8cd028459b52" containerName="extract-utilities" Nov 24 15:25:45 crc kubenswrapper[5039]: E1124 15:25:45.955245 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3ec6f18-f5f7-4fc3-ad28-8cd028459b52" containerName="extract-content" Nov 24 15:25:45 crc kubenswrapper[5039]: I1124 15:25:45.955252 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3ec6f18-f5f7-4fc3-ad28-8cd028459b52" containerName="extract-content" Nov 24 15:25:45 crc kubenswrapper[5039]: E1124 15:25:45.955267 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3ec6f18-f5f7-4fc3-ad28-8cd028459b52" containerName="registry-server" Nov 24 15:25:45 crc kubenswrapper[5039]: I1124 15:25:45.955273 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3ec6f18-f5f7-4fc3-ad28-8cd028459b52" containerName="registry-server" Nov 24 15:25:45 crc kubenswrapper[5039]: I1124 15:25:45.955481 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3ec6f18-f5f7-4fc3-ad28-8cd028459b52" containerName="registry-server" Nov 24 15:25:45 crc kubenswrapper[5039]: I1124 15:25:45.956655 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-k88gt/must-gather-wtzc2" Nov 24 15:25:45 crc kubenswrapper[5039]: I1124 15:25:45.959900 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-k88gt"/"kube-root-ca.crt" Nov 24 15:25:45 crc kubenswrapper[5039]: I1124 15:25:45.960114 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-k88gt"/"openshift-service-ca.crt" Nov 24 15:25:45 crc kubenswrapper[5039]: I1124 15:25:45.973760 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-k88gt/must-gather-wtzc2"] Nov 24 15:25:46 crc kubenswrapper[5039]: I1124 15:25:46.055525 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cq46j\" (UniqueName: \"kubernetes.io/projected/fd30c717-0e76-4e6f-bcc3-e48a24658894-kube-api-access-cq46j\") pod \"must-gather-wtzc2\" (UID: \"fd30c717-0e76-4e6f-bcc3-e48a24658894\") " pod="openshift-must-gather-k88gt/must-gather-wtzc2" Nov 24 15:25:46 crc kubenswrapper[5039]: I1124 15:25:46.055876 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/fd30c717-0e76-4e6f-bcc3-e48a24658894-must-gather-output\") pod \"must-gather-wtzc2\" (UID: \"fd30c717-0e76-4e6f-bcc3-e48a24658894\") " pod="openshift-must-gather-k88gt/must-gather-wtzc2" Nov 24 15:25:46 crc kubenswrapper[5039]: I1124 15:25:46.157759 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cq46j\" (UniqueName: \"kubernetes.io/projected/fd30c717-0e76-4e6f-bcc3-e48a24658894-kube-api-access-cq46j\") pod \"must-gather-wtzc2\" (UID: \"fd30c717-0e76-4e6f-bcc3-e48a24658894\") " pod="openshift-must-gather-k88gt/must-gather-wtzc2" Nov 24 15:25:46 crc kubenswrapper[5039]: I1124 15:25:46.157877 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/fd30c717-0e76-4e6f-bcc3-e48a24658894-must-gather-output\") pod \"must-gather-wtzc2\" (UID: \"fd30c717-0e76-4e6f-bcc3-e48a24658894\") " pod="openshift-must-gather-k88gt/must-gather-wtzc2" Nov 24 15:25:46 crc kubenswrapper[5039]: I1124 15:25:46.158429 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/fd30c717-0e76-4e6f-bcc3-e48a24658894-must-gather-output\") pod \"must-gather-wtzc2\" (UID: \"fd30c717-0e76-4e6f-bcc3-e48a24658894\") " pod="openshift-must-gather-k88gt/must-gather-wtzc2" Nov 24 15:25:46 crc kubenswrapper[5039]: I1124 15:25:46.194997 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cq46j\" (UniqueName: \"kubernetes.io/projected/fd30c717-0e76-4e6f-bcc3-e48a24658894-kube-api-access-cq46j\") pod \"must-gather-wtzc2\" (UID: \"fd30c717-0e76-4e6f-bcc3-e48a24658894\") " pod="openshift-must-gather-k88gt/must-gather-wtzc2" Nov 24 15:25:46 crc kubenswrapper[5039]: I1124 15:25:46.277962 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-k88gt/must-gather-wtzc2" Nov 24 15:25:46 crc kubenswrapper[5039]: I1124 15:25:46.801392 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-k88gt/must-gather-wtzc2"] Nov 24 15:25:46 crc kubenswrapper[5039]: W1124 15:25:46.817791 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfd30c717_0e76_4e6f_bcc3_e48a24658894.slice/crio-acb0f6ae9d8cf6d51eaf592df648237feb76219232ab4053ccd183c2617f1147 WatchSource:0}: Error finding container acb0f6ae9d8cf6d51eaf592df648237feb76219232ab4053ccd183c2617f1147: Status 404 returned error can't find the container with id acb0f6ae9d8cf6d51eaf592df648237feb76219232ab4053ccd183c2617f1147 Nov 24 15:25:47 crc kubenswrapper[5039]: I1124 15:25:47.277934 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-k88gt/must-gather-wtzc2" event={"ID":"fd30c717-0e76-4e6f-bcc3-e48a24658894","Type":"ContainerStarted","Data":"a6ffa8f2dca749e749f1c08c2a067b9f9760149697cab4558a97327536b75575"} Nov 24 15:25:47 crc kubenswrapper[5039]: I1124 15:25:47.278186 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-k88gt/must-gather-wtzc2" event={"ID":"fd30c717-0e76-4e6f-bcc3-e48a24658894","Type":"ContainerStarted","Data":"acb0f6ae9d8cf6d51eaf592df648237feb76219232ab4053ccd183c2617f1147"} Nov 24 15:25:48 crc kubenswrapper[5039]: I1124 15:25:48.297060 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-k88gt/must-gather-wtzc2" event={"ID":"fd30c717-0e76-4e6f-bcc3-e48a24658894","Type":"ContainerStarted","Data":"50e62532f0df608b90adcb17c9f88f289586fb2d7008c7cf4d47d150c5617038"} Nov 24 15:25:48 crc kubenswrapper[5039]: I1124 15:25:48.331391 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-k88gt/must-gather-wtzc2" podStartSLOduration=3.331369889 podStartE2EDuration="3.331369889s" podCreationTimestamp="2025-11-24 15:25:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 15:25:48.313220396 +0000 UTC m=+7660.752344926" watchObservedRunningTime="2025-11-24 15:25:48.331369889 +0000 UTC m=+7660.770494399" Nov 24 15:25:50 crc kubenswrapper[5039]: I1124 15:25:50.101755 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 15:25:50 crc kubenswrapper[5039]: I1124 15:25:50.102468 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 15:25:50 crc kubenswrapper[5039]: E1124 15:25:50.594022 5039 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.175:52560->38.102.83.175:41425: read tcp 38.102.83.175:52560->38.102.83.175:41425: read: connection reset by peer Nov 24 15:25:51 crc kubenswrapper[5039]: I1124 15:25:51.295026 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-k88gt/crc-debug-xwcqq"] Nov 24 15:25:51 crc kubenswrapper[5039]: I1124 15:25:51.296624 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-k88gt/crc-debug-xwcqq" Nov 24 15:25:51 crc kubenswrapper[5039]: I1124 15:25:51.299272 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-k88gt"/"default-dockercfg-l5zfw" Nov 24 15:25:51 crc kubenswrapper[5039]: I1124 15:25:51.380925 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4lcj\" (UniqueName: \"kubernetes.io/projected/6358a0d5-9730-4e25-82aa-24ed4182f008-kube-api-access-r4lcj\") pod \"crc-debug-xwcqq\" (UID: \"6358a0d5-9730-4e25-82aa-24ed4182f008\") " pod="openshift-must-gather-k88gt/crc-debug-xwcqq" Nov 24 15:25:51 crc kubenswrapper[5039]: I1124 15:25:51.381614 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6358a0d5-9730-4e25-82aa-24ed4182f008-host\") pod \"crc-debug-xwcqq\" (UID: \"6358a0d5-9730-4e25-82aa-24ed4182f008\") " pod="openshift-must-gather-k88gt/crc-debug-xwcqq" Nov 24 15:25:51 crc kubenswrapper[5039]: I1124 15:25:51.483656 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6358a0d5-9730-4e25-82aa-24ed4182f008-host\") pod \"crc-debug-xwcqq\" (UID: \"6358a0d5-9730-4e25-82aa-24ed4182f008\") " pod="openshift-must-gather-k88gt/crc-debug-xwcqq" Nov 24 15:25:51 crc kubenswrapper[5039]: I1124 15:25:51.484095 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4lcj\" (UniqueName: \"kubernetes.io/projected/6358a0d5-9730-4e25-82aa-24ed4182f008-kube-api-access-r4lcj\") pod \"crc-debug-xwcqq\" (UID: \"6358a0d5-9730-4e25-82aa-24ed4182f008\") " pod="openshift-must-gather-k88gt/crc-debug-xwcqq" Nov 24 15:25:51 crc kubenswrapper[5039]: I1124 15:25:51.483880 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6358a0d5-9730-4e25-82aa-24ed4182f008-host\") pod \"crc-debug-xwcqq\" (UID: \"6358a0d5-9730-4e25-82aa-24ed4182f008\") " pod="openshift-must-gather-k88gt/crc-debug-xwcqq" Nov 24 15:25:51 crc kubenswrapper[5039]: I1124 15:25:51.508180 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4lcj\" (UniqueName: \"kubernetes.io/projected/6358a0d5-9730-4e25-82aa-24ed4182f008-kube-api-access-r4lcj\") pod \"crc-debug-xwcqq\" (UID: \"6358a0d5-9730-4e25-82aa-24ed4182f008\") " pod="openshift-must-gather-k88gt/crc-debug-xwcqq" Nov 24 15:25:51 crc kubenswrapper[5039]: I1124 15:25:51.614704 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-k88gt/crc-debug-xwcqq" Nov 24 15:25:51 crc kubenswrapper[5039]: W1124 15:25:51.664191 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6358a0d5_9730_4e25_82aa_24ed4182f008.slice/crio-35e06988a0aa1474e49f989978890a832f0f6892f2260e563253421d7ac572b3 WatchSource:0}: Error finding container 35e06988a0aa1474e49f989978890a832f0f6892f2260e563253421d7ac572b3: Status 404 returned error can't find the container with id 35e06988a0aa1474e49f989978890a832f0f6892f2260e563253421d7ac572b3 Nov 24 15:25:52 crc kubenswrapper[5039]: I1124 15:25:52.357582 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-k88gt/crc-debug-xwcqq" event={"ID":"6358a0d5-9730-4e25-82aa-24ed4182f008","Type":"ContainerStarted","Data":"930d6972542c2cdf0b853e94af05afa297457c2b8656e0a59e1adfa4b5b33a0c"} Nov 24 15:25:52 crc kubenswrapper[5039]: I1124 15:25:52.358004 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-k88gt/crc-debug-xwcqq" event={"ID":"6358a0d5-9730-4e25-82aa-24ed4182f008","Type":"ContainerStarted","Data":"35e06988a0aa1474e49f989978890a832f0f6892f2260e563253421d7ac572b3"} Nov 24 15:25:52 crc kubenswrapper[5039]: I1124 15:25:52.370842 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-k88gt/crc-debug-xwcqq" podStartSLOduration=1.370826879 podStartE2EDuration="1.370826879s" podCreationTimestamp="2025-11-24 15:25:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 15:25:52.36963962 +0000 UTC m=+7664.808764120" watchObservedRunningTime="2025-11-24 15:25:52.370826879 +0000 UTC m=+7664.809951379" Nov 24 15:26:20 crc kubenswrapper[5039]: I1124 15:26:20.101620 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 15:26:20 crc kubenswrapper[5039]: I1124 15:26:20.102249 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 15:26:32 crc kubenswrapper[5039]: I1124 15:26:32.796447 5039 generic.go:334] "Generic (PLEG): container finished" podID="6358a0d5-9730-4e25-82aa-24ed4182f008" containerID="930d6972542c2cdf0b853e94af05afa297457c2b8656e0a59e1adfa4b5b33a0c" exitCode=0 Nov 24 15:26:32 crc kubenswrapper[5039]: I1124 15:26:32.796538 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-k88gt/crc-debug-xwcqq" event={"ID":"6358a0d5-9730-4e25-82aa-24ed4182f008","Type":"ContainerDied","Data":"930d6972542c2cdf0b853e94af05afa297457c2b8656e0a59e1adfa4b5b33a0c"} Nov 24 15:26:33 crc kubenswrapper[5039]: I1124 15:26:33.959835 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-k88gt/crc-debug-xwcqq" Nov 24 15:26:33 crc kubenswrapper[5039]: I1124 15:26:33.993136 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-k88gt/crc-debug-xwcqq"] Nov 24 15:26:34 crc kubenswrapper[5039]: I1124 15:26:34.002468 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-k88gt/crc-debug-xwcqq"] Nov 24 15:26:34 crc kubenswrapper[5039]: I1124 15:26:34.107366 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r4lcj\" (UniqueName: \"kubernetes.io/projected/6358a0d5-9730-4e25-82aa-24ed4182f008-kube-api-access-r4lcj\") pod \"6358a0d5-9730-4e25-82aa-24ed4182f008\" (UID: \"6358a0d5-9730-4e25-82aa-24ed4182f008\") " Nov 24 15:26:34 crc kubenswrapper[5039]: I1124 15:26:34.107531 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6358a0d5-9730-4e25-82aa-24ed4182f008-host\") pod \"6358a0d5-9730-4e25-82aa-24ed4182f008\" (UID: \"6358a0d5-9730-4e25-82aa-24ed4182f008\") " Nov 24 15:26:34 crc kubenswrapper[5039]: I1124 15:26:34.108310 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6358a0d5-9730-4e25-82aa-24ed4182f008-host" (OuterVolumeSpecName: "host") pod "6358a0d5-9730-4e25-82aa-24ed4182f008" (UID: "6358a0d5-9730-4e25-82aa-24ed4182f008"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 15:26:34 crc kubenswrapper[5039]: I1124 15:26:34.114122 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6358a0d5-9730-4e25-82aa-24ed4182f008-kube-api-access-r4lcj" (OuterVolumeSpecName: "kube-api-access-r4lcj") pod "6358a0d5-9730-4e25-82aa-24ed4182f008" (UID: "6358a0d5-9730-4e25-82aa-24ed4182f008"). InnerVolumeSpecName "kube-api-access-r4lcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:26:34 crc kubenswrapper[5039]: I1124 15:26:34.210671 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r4lcj\" (UniqueName: \"kubernetes.io/projected/6358a0d5-9730-4e25-82aa-24ed4182f008-kube-api-access-r4lcj\") on node \"crc\" DevicePath \"\"" Nov 24 15:26:34 crc kubenswrapper[5039]: I1124 15:26:34.210715 5039 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6358a0d5-9730-4e25-82aa-24ed4182f008-host\") on node \"crc\" DevicePath \"\"" Nov 24 15:26:34 crc kubenswrapper[5039]: I1124 15:26:34.322754 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6358a0d5-9730-4e25-82aa-24ed4182f008" path="/var/lib/kubelet/pods/6358a0d5-9730-4e25-82aa-24ed4182f008/volumes" Nov 24 15:26:34 crc kubenswrapper[5039]: I1124 15:26:34.818517 5039 scope.go:117] "RemoveContainer" containerID="930d6972542c2cdf0b853e94af05afa297457c2b8656e0a59e1adfa4b5b33a0c" Nov 24 15:26:34 crc kubenswrapper[5039]: I1124 15:26:34.818621 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-k88gt/crc-debug-xwcqq" Nov 24 15:26:35 crc kubenswrapper[5039]: I1124 15:26:35.189330 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-k88gt/crc-debug-gpn75"] Nov 24 15:26:35 crc kubenswrapper[5039]: E1124 15:26:35.189911 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6358a0d5-9730-4e25-82aa-24ed4182f008" containerName="container-00" Nov 24 15:26:35 crc kubenswrapper[5039]: I1124 15:26:35.189940 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="6358a0d5-9730-4e25-82aa-24ed4182f008" containerName="container-00" Nov 24 15:26:35 crc kubenswrapper[5039]: I1124 15:26:35.190267 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="6358a0d5-9730-4e25-82aa-24ed4182f008" containerName="container-00" Nov 24 15:26:35 crc kubenswrapper[5039]: I1124 15:26:35.191219 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-k88gt/crc-debug-gpn75" Nov 24 15:26:35 crc kubenswrapper[5039]: I1124 15:26:35.192651 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-k88gt"/"default-dockercfg-l5zfw" Nov 24 15:26:35 crc kubenswrapper[5039]: I1124 15:26:35.233331 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0fb89b9e-e200-48d9-82e2-cfa2c2466773-host\") pod \"crc-debug-gpn75\" (UID: \"0fb89b9e-e200-48d9-82e2-cfa2c2466773\") " pod="openshift-must-gather-k88gt/crc-debug-gpn75" Nov 24 15:26:35 crc kubenswrapper[5039]: I1124 15:26:35.233465 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7szx\" (UniqueName: \"kubernetes.io/projected/0fb89b9e-e200-48d9-82e2-cfa2c2466773-kube-api-access-z7szx\") pod \"crc-debug-gpn75\" (UID: \"0fb89b9e-e200-48d9-82e2-cfa2c2466773\") " pod="openshift-must-gather-k88gt/crc-debug-gpn75" Nov 24 15:26:35 crc kubenswrapper[5039]: I1124 15:26:35.334893 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7szx\" (UniqueName: \"kubernetes.io/projected/0fb89b9e-e200-48d9-82e2-cfa2c2466773-kube-api-access-z7szx\") pod \"crc-debug-gpn75\" (UID: \"0fb89b9e-e200-48d9-82e2-cfa2c2466773\") " pod="openshift-must-gather-k88gt/crc-debug-gpn75" Nov 24 15:26:35 crc kubenswrapper[5039]: I1124 15:26:35.335379 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0fb89b9e-e200-48d9-82e2-cfa2c2466773-host\") pod \"crc-debug-gpn75\" (UID: \"0fb89b9e-e200-48d9-82e2-cfa2c2466773\") " pod="openshift-must-gather-k88gt/crc-debug-gpn75" Nov 24 15:26:35 crc kubenswrapper[5039]: I1124 15:26:35.336008 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0fb89b9e-e200-48d9-82e2-cfa2c2466773-host\") pod \"crc-debug-gpn75\" (UID: \"0fb89b9e-e200-48d9-82e2-cfa2c2466773\") " pod="openshift-must-gather-k88gt/crc-debug-gpn75" Nov 24 15:26:35 crc kubenswrapper[5039]: I1124 15:26:35.352419 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7szx\" (UniqueName: \"kubernetes.io/projected/0fb89b9e-e200-48d9-82e2-cfa2c2466773-kube-api-access-z7szx\") pod \"crc-debug-gpn75\" (UID: \"0fb89b9e-e200-48d9-82e2-cfa2c2466773\") " pod="openshift-must-gather-k88gt/crc-debug-gpn75" Nov 24 15:26:35 crc kubenswrapper[5039]: I1124 15:26:35.510361 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-k88gt/crc-debug-gpn75" Nov 24 15:26:35 crc kubenswrapper[5039]: I1124 15:26:35.850066 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-k88gt/crc-debug-gpn75" event={"ID":"0fb89b9e-e200-48d9-82e2-cfa2c2466773","Type":"ContainerStarted","Data":"0c82ae1c3952e84223446c6c82478764afcdad3a1e1c3011508933b808dae822"} Nov 24 15:26:36 crc kubenswrapper[5039]: E1124 15:26:36.073488 5039 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0fb89b9e_e200_48d9_82e2_cfa2c2466773.slice/crio-af8704e96204502a5a549f9c92037e1323f761545ec2d472bcacfcc7d1e27394.scope\": RecentStats: unable to find data in memory cache]" Nov 24 15:26:36 crc kubenswrapper[5039]: I1124 15:26:36.861766 5039 generic.go:334] "Generic (PLEG): container finished" podID="0fb89b9e-e200-48d9-82e2-cfa2c2466773" containerID="af8704e96204502a5a549f9c92037e1323f761545ec2d472bcacfcc7d1e27394" exitCode=0 Nov 24 15:26:36 crc kubenswrapper[5039]: I1124 15:26:36.861841 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-k88gt/crc-debug-gpn75" event={"ID":"0fb89b9e-e200-48d9-82e2-cfa2c2466773","Type":"ContainerDied","Data":"af8704e96204502a5a549f9c92037e1323f761545ec2d472bcacfcc7d1e27394"} Nov 24 15:26:38 crc kubenswrapper[5039]: I1124 15:26:38.005838 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-k88gt/crc-debug-gpn75" Nov 24 15:26:38 crc kubenswrapper[5039]: I1124 15:26:38.196174 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z7szx\" (UniqueName: \"kubernetes.io/projected/0fb89b9e-e200-48d9-82e2-cfa2c2466773-kube-api-access-z7szx\") pod \"0fb89b9e-e200-48d9-82e2-cfa2c2466773\" (UID: \"0fb89b9e-e200-48d9-82e2-cfa2c2466773\") " Nov 24 15:26:38 crc kubenswrapper[5039]: I1124 15:26:38.196307 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0fb89b9e-e200-48d9-82e2-cfa2c2466773-host\") pod \"0fb89b9e-e200-48d9-82e2-cfa2c2466773\" (UID: \"0fb89b9e-e200-48d9-82e2-cfa2c2466773\") " Nov 24 15:26:38 crc kubenswrapper[5039]: I1124 15:26:38.196418 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0fb89b9e-e200-48d9-82e2-cfa2c2466773-host" (OuterVolumeSpecName: "host") pod "0fb89b9e-e200-48d9-82e2-cfa2c2466773" (UID: "0fb89b9e-e200-48d9-82e2-cfa2c2466773"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 15:26:38 crc kubenswrapper[5039]: I1124 15:26:38.196991 5039 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0fb89b9e-e200-48d9-82e2-cfa2c2466773-host\") on node \"crc\" DevicePath \"\"" Nov 24 15:26:38 crc kubenswrapper[5039]: I1124 15:26:38.202060 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0fb89b9e-e200-48d9-82e2-cfa2c2466773-kube-api-access-z7szx" (OuterVolumeSpecName: "kube-api-access-z7szx") pod "0fb89b9e-e200-48d9-82e2-cfa2c2466773" (UID: "0fb89b9e-e200-48d9-82e2-cfa2c2466773"). InnerVolumeSpecName "kube-api-access-z7szx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:26:38 crc kubenswrapper[5039]: I1124 15:26:38.299265 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z7szx\" (UniqueName: \"kubernetes.io/projected/0fb89b9e-e200-48d9-82e2-cfa2c2466773-kube-api-access-z7szx\") on node \"crc\" DevicePath \"\"" Nov 24 15:26:38 crc kubenswrapper[5039]: I1124 15:26:38.880089 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-k88gt/crc-debug-gpn75" event={"ID":"0fb89b9e-e200-48d9-82e2-cfa2c2466773","Type":"ContainerDied","Data":"0c82ae1c3952e84223446c6c82478764afcdad3a1e1c3011508933b808dae822"} Nov 24 15:26:38 crc kubenswrapper[5039]: I1124 15:26:38.880130 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0c82ae1c3952e84223446c6c82478764afcdad3a1e1c3011508933b808dae822" Nov 24 15:26:38 crc kubenswrapper[5039]: I1124 15:26:38.880134 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-k88gt/crc-debug-gpn75" Nov 24 15:26:39 crc kubenswrapper[5039]: I1124 15:26:39.143243 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-k88gt/crc-debug-gpn75"] Nov 24 15:26:39 crc kubenswrapper[5039]: I1124 15:26:39.152826 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-k88gt/crc-debug-gpn75"] Nov 24 15:26:40 crc kubenswrapper[5039]: I1124 15:26:40.327003 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0fb89b9e-e200-48d9-82e2-cfa2c2466773" path="/var/lib/kubelet/pods/0fb89b9e-e200-48d9-82e2-cfa2c2466773/volumes" Nov 24 15:26:40 crc kubenswrapper[5039]: I1124 15:26:40.408933 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-k88gt/crc-debug-m2xkf"] Nov 24 15:26:40 crc kubenswrapper[5039]: E1124 15:26:40.409422 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fb89b9e-e200-48d9-82e2-cfa2c2466773" containerName="container-00" Nov 24 15:26:40 crc kubenswrapper[5039]: I1124 15:26:40.409437 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fb89b9e-e200-48d9-82e2-cfa2c2466773" containerName="container-00" Nov 24 15:26:40 crc kubenswrapper[5039]: I1124 15:26:40.409654 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fb89b9e-e200-48d9-82e2-cfa2c2466773" containerName="container-00" Nov 24 15:26:40 crc kubenswrapper[5039]: I1124 15:26:40.425150 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-k88gt/crc-debug-m2xkf" Nov 24 15:26:40 crc kubenswrapper[5039]: I1124 15:26:40.430054 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-k88gt"/"default-dockercfg-l5zfw" Nov 24 15:26:40 crc kubenswrapper[5039]: I1124 15:26:40.554618 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmvpj\" (UniqueName: \"kubernetes.io/projected/5f4aba86-b2f3-467f-90f3-1e88361d2893-kube-api-access-nmvpj\") pod \"crc-debug-m2xkf\" (UID: \"5f4aba86-b2f3-467f-90f3-1e88361d2893\") " pod="openshift-must-gather-k88gt/crc-debug-m2xkf" Nov 24 15:26:40 crc kubenswrapper[5039]: I1124 15:26:40.554700 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5f4aba86-b2f3-467f-90f3-1e88361d2893-host\") pod \"crc-debug-m2xkf\" (UID: \"5f4aba86-b2f3-467f-90f3-1e88361d2893\") " pod="openshift-must-gather-k88gt/crc-debug-m2xkf" Nov 24 15:26:40 crc kubenswrapper[5039]: I1124 15:26:40.657479 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmvpj\" (UniqueName: \"kubernetes.io/projected/5f4aba86-b2f3-467f-90f3-1e88361d2893-kube-api-access-nmvpj\") pod \"crc-debug-m2xkf\" (UID: \"5f4aba86-b2f3-467f-90f3-1e88361d2893\") " pod="openshift-must-gather-k88gt/crc-debug-m2xkf" Nov 24 15:26:40 crc kubenswrapper[5039]: I1124 15:26:40.657602 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5f4aba86-b2f3-467f-90f3-1e88361d2893-host\") pod \"crc-debug-m2xkf\" (UID: \"5f4aba86-b2f3-467f-90f3-1e88361d2893\") " pod="openshift-must-gather-k88gt/crc-debug-m2xkf" Nov 24 15:26:40 crc kubenswrapper[5039]: I1124 15:26:40.657774 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5f4aba86-b2f3-467f-90f3-1e88361d2893-host\") pod \"crc-debug-m2xkf\" (UID: \"5f4aba86-b2f3-467f-90f3-1e88361d2893\") " pod="openshift-must-gather-k88gt/crc-debug-m2xkf" Nov 24 15:26:40 crc kubenswrapper[5039]: I1124 15:26:40.679040 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmvpj\" (UniqueName: \"kubernetes.io/projected/5f4aba86-b2f3-467f-90f3-1e88361d2893-kube-api-access-nmvpj\") pod \"crc-debug-m2xkf\" (UID: \"5f4aba86-b2f3-467f-90f3-1e88361d2893\") " pod="openshift-must-gather-k88gt/crc-debug-m2xkf" Nov 24 15:26:40 crc kubenswrapper[5039]: I1124 15:26:40.755723 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-k88gt/crc-debug-m2xkf" Nov 24 15:26:40 crc kubenswrapper[5039]: I1124 15:26:40.913793 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-k88gt/crc-debug-m2xkf" event={"ID":"5f4aba86-b2f3-467f-90f3-1e88361d2893","Type":"ContainerStarted","Data":"11394685efc8862948db0e755a83c31156fdf13daf30c5142a666b3fc458e16e"} Nov 24 15:26:41 crc kubenswrapper[5039]: I1124 15:26:41.931609 5039 generic.go:334] "Generic (PLEG): container finished" podID="5f4aba86-b2f3-467f-90f3-1e88361d2893" containerID="dd32f6e6327d6ef2584c1e6ea3b05c5351ab890451075652e2089c11360ef29e" exitCode=0 Nov 24 15:26:41 crc kubenswrapper[5039]: I1124 15:26:41.931728 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-k88gt/crc-debug-m2xkf" event={"ID":"5f4aba86-b2f3-467f-90f3-1e88361d2893","Type":"ContainerDied","Data":"dd32f6e6327d6ef2584c1e6ea3b05c5351ab890451075652e2089c11360ef29e"} Nov 24 15:26:41 crc kubenswrapper[5039]: I1124 15:26:41.988646 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-k88gt/crc-debug-m2xkf"] Nov 24 15:26:42 crc kubenswrapper[5039]: I1124 15:26:42.002037 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-k88gt/crc-debug-m2xkf"] Nov 24 15:26:43 crc kubenswrapper[5039]: I1124 15:26:43.079936 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-k88gt/crc-debug-m2xkf" Nov 24 15:26:43 crc kubenswrapper[5039]: I1124 15:26:43.223436 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nmvpj\" (UniqueName: \"kubernetes.io/projected/5f4aba86-b2f3-467f-90f3-1e88361d2893-kube-api-access-nmvpj\") pod \"5f4aba86-b2f3-467f-90f3-1e88361d2893\" (UID: \"5f4aba86-b2f3-467f-90f3-1e88361d2893\") " Nov 24 15:26:43 crc kubenswrapper[5039]: I1124 15:26:43.223821 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5f4aba86-b2f3-467f-90f3-1e88361d2893-host\") pod \"5f4aba86-b2f3-467f-90f3-1e88361d2893\" (UID: \"5f4aba86-b2f3-467f-90f3-1e88361d2893\") " Nov 24 15:26:43 crc kubenswrapper[5039]: I1124 15:26:43.224019 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5f4aba86-b2f3-467f-90f3-1e88361d2893-host" (OuterVolumeSpecName: "host") pod "5f4aba86-b2f3-467f-90f3-1e88361d2893" (UID: "5f4aba86-b2f3-467f-90f3-1e88361d2893"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 15:26:43 crc kubenswrapper[5039]: I1124 15:26:43.224486 5039 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5f4aba86-b2f3-467f-90f3-1e88361d2893-host\") on node \"crc\" DevicePath \"\"" Nov 24 15:26:43 crc kubenswrapper[5039]: I1124 15:26:43.229714 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f4aba86-b2f3-467f-90f3-1e88361d2893-kube-api-access-nmvpj" (OuterVolumeSpecName: "kube-api-access-nmvpj") pod "5f4aba86-b2f3-467f-90f3-1e88361d2893" (UID: "5f4aba86-b2f3-467f-90f3-1e88361d2893"). InnerVolumeSpecName "kube-api-access-nmvpj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:26:43 crc kubenswrapper[5039]: I1124 15:26:43.326358 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nmvpj\" (UniqueName: \"kubernetes.io/projected/5f4aba86-b2f3-467f-90f3-1e88361d2893-kube-api-access-nmvpj\") on node \"crc\" DevicePath \"\"" Nov 24 15:26:43 crc kubenswrapper[5039]: I1124 15:26:43.953630 5039 scope.go:117] "RemoveContainer" containerID="dd32f6e6327d6ef2584c1e6ea3b05c5351ab890451075652e2089c11360ef29e" Nov 24 15:26:43 crc kubenswrapper[5039]: I1124 15:26:43.953755 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-k88gt/crc-debug-m2xkf" Nov 24 15:26:44 crc kubenswrapper[5039]: I1124 15:26:44.317710 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f4aba86-b2f3-467f-90f3-1e88361d2893" path="/var/lib/kubelet/pods/5f4aba86-b2f3-467f-90f3-1e88361d2893/volumes" Nov 24 15:26:50 crc kubenswrapper[5039]: I1124 15:26:50.101633 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 15:26:50 crc kubenswrapper[5039]: I1124 15:26:50.102242 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 15:26:50 crc kubenswrapper[5039]: I1124 15:26:50.102299 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 15:26:50 crc kubenswrapper[5039]: I1124 15:26:50.103340 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 15:26:50 crc kubenswrapper[5039]: I1124 15:26:50.103429 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" gracePeriod=600 Nov 24 15:26:50 crc kubenswrapper[5039]: E1124 15:26:50.225376 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:26:51 crc kubenswrapper[5039]: I1124 15:26:51.035289 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" exitCode=0 Nov 24 15:26:51 crc kubenswrapper[5039]: I1124 15:26:51.035363 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7"} Nov 24 15:26:51 crc kubenswrapper[5039]: I1124 15:26:51.035650 5039 scope.go:117] "RemoveContainer" containerID="c63b161b15ffbf31f7bd32b6256206164578c32ffa789a8c58aea4157b4a1605" Nov 24 15:26:51 crc kubenswrapper[5039]: I1124 15:26:51.036343 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:26:51 crc kubenswrapper[5039]: E1124 15:26:51.036633 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:27:03 crc kubenswrapper[5039]: I1124 15:27:03.308771 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:27:03 crc kubenswrapper[5039]: E1124 15:27:03.309790 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:27:16 crc kubenswrapper[5039]: I1124 15:27:16.307984 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:27:16 crc kubenswrapper[5039]: E1124 15:27:16.309488 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:27:29 crc kubenswrapper[5039]: I1124 15:27:29.308556 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:27:29 crc kubenswrapper[5039]: E1124 15:27:29.309752 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:27:30 crc kubenswrapper[5039]: I1124 15:27:30.224708 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7nqfh"] Nov 24 15:27:30 crc kubenswrapper[5039]: E1124 15:27:30.225316 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f4aba86-b2f3-467f-90f3-1e88361d2893" containerName="container-00" Nov 24 15:27:30 crc kubenswrapper[5039]: I1124 15:27:30.225339 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f4aba86-b2f3-467f-90f3-1e88361d2893" containerName="container-00" Nov 24 15:27:30 crc kubenswrapper[5039]: I1124 15:27:30.225667 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f4aba86-b2f3-467f-90f3-1e88361d2893" containerName="container-00" Nov 24 15:27:30 crc kubenswrapper[5039]: I1124 15:27:30.228099 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7nqfh" Nov 24 15:27:30 crc kubenswrapper[5039]: I1124 15:27:30.247185 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7nqfh"] Nov 24 15:27:30 crc kubenswrapper[5039]: I1124 15:27:30.299745 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c28f177-f335-43ff-aa8d-0ddf05759fba-catalog-content\") pod \"redhat-operators-7nqfh\" (UID: \"0c28f177-f335-43ff-aa8d-0ddf05759fba\") " pod="openshift-marketplace/redhat-operators-7nqfh" Nov 24 15:27:30 crc kubenswrapper[5039]: I1124 15:27:30.299831 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5w6pw\" (UniqueName: \"kubernetes.io/projected/0c28f177-f335-43ff-aa8d-0ddf05759fba-kube-api-access-5w6pw\") pod \"redhat-operators-7nqfh\" (UID: \"0c28f177-f335-43ff-aa8d-0ddf05759fba\") " pod="openshift-marketplace/redhat-operators-7nqfh" Nov 24 15:27:30 crc kubenswrapper[5039]: I1124 15:27:30.299886 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c28f177-f335-43ff-aa8d-0ddf05759fba-utilities\") pod \"redhat-operators-7nqfh\" (UID: \"0c28f177-f335-43ff-aa8d-0ddf05759fba\") " pod="openshift-marketplace/redhat-operators-7nqfh" Nov 24 15:27:30 crc kubenswrapper[5039]: I1124 15:27:30.401875 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c28f177-f335-43ff-aa8d-0ddf05759fba-utilities\") pod \"redhat-operators-7nqfh\" (UID: \"0c28f177-f335-43ff-aa8d-0ddf05759fba\") " pod="openshift-marketplace/redhat-operators-7nqfh" Nov 24 15:27:30 crc kubenswrapper[5039]: I1124 15:27:30.402392 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c28f177-f335-43ff-aa8d-0ddf05759fba-utilities\") pod \"redhat-operators-7nqfh\" (UID: \"0c28f177-f335-43ff-aa8d-0ddf05759fba\") " pod="openshift-marketplace/redhat-operators-7nqfh" Nov 24 15:27:30 crc kubenswrapper[5039]: I1124 15:27:30.402636 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c28f177-f335-43ff-aa8d-0ddf05759fba-catalog-content\") pod \"redhat-operators-7nqfh\" (UID: \"0c28f177-f335-43ff-aa8d-0ddf05759fba\") " pod="openshift-marketplace/redhat-operators-7nqfh" Nov 24 15:27:30 crc kubenswrapper[5039]: I1124 15:27:30.402807 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5w6pw\" (UniqueName: \"kubernetes.io/projected/0c28f177-f335-43ff-aa8d-0ddf05759fba-kube-api-access-5w6pw\") pod \"redhat-operators-7nqfh\" (UID: \"0c28f177-f335-43ff-aa8d-0ddf05759fba\") " pod="openshift-marketplace/redhat-operators-7nqfh" Nov 24 15:27:30 crc kubenswrapper[5039]: I1124 15:27:30.403103 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c28f177-f335-43ff-aa8d-0ddf05759fba-catalog-content\") pod \"redhat-operators-7nqfh\" (UID: \"0c28f177-f335-43ff-aa8d-0ddf05759fba\") " pod="openshift-marketplace/redhat-operators-7nqfh" Nov 24 15:27:30 crc kubenswrapper[5039]: I1124 15:27:30.422371 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5w6pw\" (UniqueName: \"kubernetes.io/projected/0c28f177-f335-43ff-aa8d-0ddf05759fba-kube-api-access-5w6pw\") pod \"redhat-operators-7nqfh\" (UID: \"0c28f177-f335-43ff-aa8d-0ddf05759fba\") " pod="openshift-marketplace/redhat-operators-7nqfh" Nov 24 15:27:30 crc kubenswrapper[5039]: I1124 15:27:30.553158 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7nqfh" Nov 24 15:27:31 crc kubenswrapper[5039]: I1124 15:27:31.060010 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7nqfh"] Nov 24 15:27:31 crc kubenswrapper[5039]: W1124 15:27:31.067737 5039 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0c28f177_f335_43ff_aa8d_0ddf05759fba.slice/crio-88112ad69066435f6ba185378809e2503b5caf9751a0c7469d808dc672bcb1e2 WatchSource:0}: Error finding container 88112ad69066435f6ba185378809e2503b5caf9751a0c7469d808dc672bcb1e2: Status 404 returned error can't find the container with id 88112ad69066435f6ba185378809e2503b5caf9751a0c7469d808dc672bcb1e2 Nov 24 15:27:31 crc kubenswrapper[5039]: I1124 15:27:31.570271 5039 generic.go:334] "Generic (PLEG): container finished" podID="0c28f177-f335-43ff-aa8d-0ddf05759fba" containerID="d9061f86d7aadd1997f10ea82854ae81e4977bc5db93651cdffc2eade8a5def1" exitCode=0 Nov 24 15:27:31 crc kubenswrapper[5039]: I1124 15:27:31.570373 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7nqfh" event={"ID":"0c28f177-f335-43ff-aa8d-0ddf05759fba","Type":"ContainerDied","Data":"d9061f86d7aadd1997f10ea82854ae81e4977bc5db93651cdffc2eade8a5def1"} Nov 24 15:27:31 crc kubenswrapper[5039]: I1124 15:27:31.570403 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7nqfh" event={"ID":"0c28f177-f335-43ff-aa8d-0ddf05759fba","Type":"ContainerStarted","Data":"88112ad69066435f6ba185378809e2503b5caf9751a0c7469d808dc672bcb1e2"} Nov 24 15:27:33 crc kubenswrapper[5039]: I1124 15:27:33.604216 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7nqfh" event={"ID":"0c28f177-f335-43ff-aa8d-0ddf05759fba","Type":"ContainerStarted","Data":"9cf7ab40434bc5be82326506485d880e26cc990ca32b8e3265f0cd1c5c8348c2"} Nov 24 15:27:37 crc kubenswrapper[5039]: I1124 15:27:37.665789 5039 generic.go:334] "Generic (PLEG): container finished" podID="0c28f177-f335-43ff-aa8d-0ddf05759fba" containerID="9cf7ab40434bc5be82326506485d880e26cc990ca32b8e3265f0cd1c5c8348c2" exitCode=0 Nov 24 15:27:37 crc kubenswrapper[5039]: I1124 15:27:37.665861 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7nqfh" event={"ID":"0c28f177-f335-43ff-aa8d-0ddf05759fba","Type":"ContainerDied","Data":"9cf7ab40434bc5be82326506485d880e26cc990ca32b8e3265f0cd1c5c8348c2"} Nov 24 15:27:38 crc kubenswrapper[5039]: I1124 15:27:38.681764 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7nqfh" event={"ID":"0c28f177-f335-43ff-aa8d-0ddf05759fba","Type":"ContainerStarted","Data":"132315a29728a118d430f72e8a757e634ccdcbe2749c18321813908b858e1868"} Nov 24 15:27:38 crc kubenswrapper[5039]: I1124 15:27:38.711534 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7nqfh" podStartSLOduration=2.222689967 podStartE2EDuration="8.71149189s" podCreationTimestamp="2025-11-24 15:27:30 +0000 UTC" firstStartedPulling="2025-11-24 15:27:31.580221098 +0000 UTC m=+7764.019345588" lastFinishedPulling="2025-11-24 15:27:38.069023011 +0000 UTC m=+7770.508147511" observedRunningTime="2025-11-24 15:27:38.698917903 +0000 UTC m=+7771.138042403" watchObservedRunningTime="2025-11-24 15:27:38.71149189 +0000 UTC m=+7771.150616390" Nov 24 15:27:40 crc kubenswrapper[5039]: I1124 15:27:40.554703 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7nqfh" Nov 24 15:27:40 crc kubenswrapper[5039]: I1124 15:27:40.555157 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7nqfh" Nov 24 15:27:41 crc kubenswrapper[5039]: I1124 15:27:41.615020 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7nqfh" podUID="0c28f177-f335-43ff-aa8d-0ddf05759fba" containerName="registry-server" probeResult="failure" output=< Nov 24 15:27:41 crc kubenswrapper[5039]: timeout: failed to connect service ":50051" within 1s Nov 24 15:27:41 crc kubenswrapper[5039]: > Nov 24 15:27:42 crc kubenswrapper[5039]: I1124 15:27:42.309365 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:27:42 crc kubenswrapper[5039]: E1124 15:27:42.309831 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:27:46 crc kubenswrapper[5039]: I1124 15:27:46.090279 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_0201ebd4-dc90-4332-b036-38d4d2a1ea2a/aodh-api/0.log" Nov 24 15:27:46 crc kubenswrapper[5039]: I1124 15:27:46.150863 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_0201ebd4-dc90-4332-b036-38d4d2a1ea2a/aodh-evaluator/0.log" Nov 24 15:27:46 crc kubenswrapper[5039]: I1124 15:27:46.272418 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_0201ebd4-dc90-4332-b036-38d4d2a1ea2a/aodh-listener/0.log" Nov 24 15:27:46 crc kubenswrapper[5039]: I1124 15:27:46.306301 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_0201ebd4-dc90-4332-b036-38d4d2a1ea2a/aodh-notifier/0.log" Nov 24 15:27:46 crc kubenswrapper[5039]: I1124 15:27:46.379167 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7c65477b5b-lzp7p_910059fe-375d-443a-8dce-3dd9d0ea7bce/barbican-api/0.log" Nov 24 15:27:46 crc kubenswrapper[5039]: I1124 15:27:46.514326 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7c65477b5b-lzp7p_910059fe-375d-443a-8dce-3dd9d0ea7bce/barbican-api-log/0.log" Nov 24 15:27:46 crc kubenswrapper[5039]: I1124 15:27:46.571842 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-746f5fd69d-mww4x_df3df8f8-f89f-4eab-98af-d7dd6cfe17da/barbican-keystone-listener/0.log" Nov 24 15:27:46 crc kubenswrapper[5039]: I1124 15:27:46.747246 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-746f5fd69d-mww4x_df3df8f8-f89f-4eab-98af-d7dd6cfe17da/barbican-keystone-listener-log/0.log" Nov 24 15:27:46 crc kubenswrapper[5039]: I1124 15:27:46.756733 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7f697665cf-n6vcs_1228dfc2-bfeb-4ba9-b0f8-ac276a2207be/barbican-worker/0.log" Nov 24 15:27:46 crc kubenswrapper[5039]: I1124 15:27:46.865568 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7f697665cf-n6vcs_1228dfc2-bfeb-4ba9-b0f8-ac276a2207be/barbican-worker-log/0.log" Nov 24 15:27:46 crc kubenswrapper[5039]: I1124 15:27:46.985293 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-87gkp_1b1f6884-b4f4-4657-a039-930296794fbe/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:27:47 crc kubenswrapper[5039]: I1124 15:27:47.159174 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_066b84eb-20a0-4d2a-b970-6a4419ac3dcc/ceilometer-central-agent/0.log" Nov 24 15:27:47 crc kubenswrapper[5039]: I1124 15:27:47.251774 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_066b84eb-20a0-4d2a-b970-6a4419ac3dcc/ceilometer-notification-agent/0.log" Nov 24 15:27:47 crc kubenswrapper[5039]: I1124 15:27:47.259363 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_066b84eb-20a0-4d2a-b970-6a4419ac3dcc/proxy-httpd/0.log" Nov 24 15:27:47 crc kubenswrapper[5039]: I1124 15:27:47.334901 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_066b84eb-20a0-4d2a-b970-6a4419ac3dcc/sg-core/0.log" Nov 24 15:27:47 crc kubenswrapper[5039]: I1124 15:27:47.445863 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-client-edpm-deployment-openstack-edpm-ipam-m6g2g_c4761a4e-a177-4629-812b-8f940a7c5b98/ceph-client-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:27:47 crc kubenswrapper[5039]: I1124 15:27:47.524917 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-jjcw8_a735f69f-6248-4a8a-aeed-cfb50b81c9cb/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:27:47 crc kubenswrapper[5039]: I1124 15:27:47.745385 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_d1847d7c-086b-4615-81d8-a6c5e915dcb4/cinder-api-log/0.log" Nov 24 15:27:47 crc kubenswrapper[5039]: I1124 15:27:47.774633 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_d1847d7c-086b-4615-81d8-a6c5e915dcb4/cinder-api/0.log" Nov 24 15:27:48 crc kubenswrapper[5039]: I1124 15:27:48.002729 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_0ed5042d-f435-4adf-aa2b-6c1949957f4c/probe/0.log" Nov 24 15:27:48 crc kubenswrapper[5039]: I1124 15:27:48.142615 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_d1395fb6-6223-4aea-9a6d-e743cecd804e/cinder-scheduler/0.log" Nov 24 15:27:48 crc kubenswrapper[5039]: I1124 15:27:48.241325 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_0ed5042d-f435-4adf-aa2b-6c1949957f4c/cinder-backup/0.log" Nov 24 15:27:48 crc kubenswrapper[5039]: I1124 15:27:48.350646 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_d1395fb6-6223-4aea-9a6d-e743cecd804e/probe/0.log" Nov 24 15:27:48 crc kubenswrapper[5039]: I1124 15:27:48.492623 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_d82cb8c7-3a11-43f9-94a7-63e8a4b824d4/cinder-volume/0.log" Nov 24 15:27:48 crc kubenswrapper[5039]: I1124 15:27:48.527261 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_d82cb8c7-3a11-43f9-94a7-63e8a4b824d4/probe/0.log" Nov 24 15:27:48 crc kubenswrapper[5039]: I1124 15:27:48.606231 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-8vsgl_3ffbbfde-6e25-49e1-ab24-061d1e90c133/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:27:48 crc kubenswrapper[5039]: I1124 15:27:48.749832 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-zbxr5_d6370bcf-3557-4e56-9c7b-670a2ec77ec0/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:27:48 crc kubenswrapper[5039]: I1124 15:27:48.868611 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-74cfff99f-ptjrg_f0ced711-f251-4bc4-b59c-4955f950f20d/init/0.log" Nov 24 15:27:49 crc kubenswrapper[5039]: I1124 15:27:49.031745 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-74cfff99f-ptjrg_f0ced711-f251-4bc4-b59c-4955f950f20d/init/0.log" Nov 24 15:27:49 crc kubenswrapper[5039]: I1124 15:27:49.055119 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_8609b6fd-f97e-4af8-811f-c86e99bf033a/glance-httpd/0.log" Nov 24 15:27:49 crc kubenswrapper[5039]: I1124 15:27:49.133148 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-74cfff99f-ptjrg_f0ced711-f251-4bc4-b59c-4955f950f20d/dnsmasq-dns/0.log" Nov 24 15:27:49 crc kubenswrapper[5039]: I1124 15:27:49.312979 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_6f1ea0e7-3b9c-4fed-85cc-901484aed56f/glance-log/0.log" Nov 24 15:27:49 crc kubenswrapper[5039]: I1124 15:27:49.314688 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_8609b6fd-f97e-4af8-811f-c86e99bf033a/glance-log/0.log" Nov 24 15:27:49 crc kubenswrapper[5039]: I1124 15:27:49.339826 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_6f1ea0e7-3b9c-4fed-85cc-901484aed56f/glance-httpd/0.log" Nov 24 15:27:50 crc kubenswrapper[5039]: I1124 15:27:50.050643 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-b4696fd89-fd5qp_71066830-9639-4b66-b1c2-cbbc8eb2a821/heat-engine/0.log" Nov 24 15:27:50 crc kubenswrapper[5039]: I1124 15:27:50.301934 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-6fbc854bcb-ssv8l_4a0e58d4-73eb-4baf-8698-4c67b711e1a8/horizon/0.log" Nov 24 15:27:50 crc kubenswrapper[5039]: I1124 15:27:50.566013 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-qkn5n_0270ae43-26ba-4706-827e-c008cf7ca4fa/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:27:50 crc kubenswrapper[5039]: I1124 15:27:50.768070 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-w6l8n_6ae099f1-378b-4de8-a8aa-480a714ccbaf/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:27:50 crc kubenswrapper[5039]: I1124 15:27:50.830794 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-6b64fd586c-rsg7v_8a04d457-423e-463d-8ea9-35d085150af5/heat-cfnapi/0.log" Nov 24 15:27:50 crc kubenswrapper[5039]: I1124 15:27:50.879233 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-7b9d6d4567-h9q74_daa63fbd-a80c-4690-b49c-e402cb6b3c69/heat-api/0.log" Nov 24 15:27:50 crc kubenswrapper[5039]: I1124 15:27:50.942546 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-6fbc854bcb-ssv8l_4a0e58d4-73eb-4baf-8698-4c67b711e1a8/horizon-log/0.log" Nov 24 15:27:51 crc kubenswrapper[5039]: I1124 15:27:51.081331 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29399881-khmls_5589d33f-8cad-4a38-ae7d-f9611bb8efc5/keystone-cron/0.log" Nov 24 15:27:51 crc kubenswrapper[5039]: I1124 15:27:51.196177 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29399941-srhkf_e88464ce-c201-4ce0-831a-bad31b599341/keystone-cron/0.log" Nov 24 15:27:51 crc kubenswrapper[5039]: I1124 15:27:51.330611 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_07adf2a8-6758-4e5e-b757-6d32eebb1f93/kube-state-metrics/0.log" Nov 24 15:27:51 crc kubenswrapper[5039]: I1124 15:27:51.469801 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-5s78f_09487809-1d9c-44f7-81e0-91d56354f51c/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:27:51 crc kubenswrapper[5039]: I1124 15:27:51.528742 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-576959578d-mb556_a0a57e07-3e25-4329-9789-c3ff435860c3/keystone-api/0.log" Nov 24 15:27:51 crc kubenswrapper[5039]: I1124 15:27:51.580037 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_logging-edpm-deployment-openstack-edpm-ipam-tpltt_bed151c2-ef33-4571-b779-761a70733f9d/logging-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:27:51 crc kubenswrapper[5039]: I1124 15:27:51.598975 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7nqfh" podUID="0c28f177-f335-43ff-aa8d-0ddf05759fba" containerName="registry-server" probeResult="failure" output=< Nov 24 15:27:51 crc kubenswrapper[5039]: timeout: failed to connect service ":50051" within 1s Nov 24 15:27:51 crc kubenswrapper[5039]: > Nov 24 15:27:51 crc kubenswrapper[5039]: I1124 15:27:51.736084 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_92bc16f0-cdd7-4437-aa94-57bf0cd83126/manila-api-log/0.log" Nov 24 15:27:51 crc kubenswrapper[5039]: I1124 15:27:51.892098 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_92bc16f0-cdd7-4437-aa94-57bf0cd83126/manila-api/0.log" Nov 24 15:27:51 crc kubenswrapper[5039]: I1124 15:27:51.979417 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_8f957236-16e6-45c4-8174-b20f69df4ecb/probe/0.log" Nov 24 15:27:51 crc kubenswrapper[5039]: I1124 15:27:51.982292 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_8f957236-16e6-45c4-8174-b20f69df4ecb/manila-scheduler/0.log" Nov 24 15:27:52 crc kubenswrapper[5039]: I1124 15:27:52.091857 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_48847c7a-e55f-4a84-8448-89447c762f34/probe/0.log" Nov 24 15:27:52 crc kubenswrapper[5039]: I1124 15:27:52.113947 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_48847c7a-e55f-4a84-8448-89447c762f34/manila-share/0.log" Nov 24 15:27:52 crc kubenswrapper[5039]: I1124 15:27:52.334429 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mysqld-exporter-0_52ce1afd-e5d8-401a-8fb1-e02b6aff131b/mysqld-exporter/0.log" Nov 24 15:27:52 crc kubenswrapper[5039]: I1124 15:27:52.701538 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-sktf7_bb9ef170-1b1b-4027-9ac0-b0e67efda529/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:27:52 crc kubenswrapper[5039]: I1124 15:27:52.715119 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7bdd5bd5df-sqgnq_05e189da-8176-4c22-9069-51d7e5f8b867/neutron-api/0.log" Nov 24 15:27:52 crc kubenswrapper[5039]: I1124 15:27:52.736747 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7bdd5bd5df-sqgnq_05e189da-8176-4c22-9069-51d7e5f8b867/neutron-httpd/0.log" Nov 24 15:27:53 crc kubenswrapper[5039]: I1124 15:27:53.498403 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_62ba09af-0d54-45af-8bed-9c8a1a3661f2/nova-cell0-conductor-conductor/0.log" Nov 24 15:27:53 crc kubenswrapper[5039]: I1124 15:27:53.708933 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_42be8bb1-8823-4a1f-8777-348baedb7758/nova-api-log/0.log" Nov 24 15:27:53 crc kubenswrapper[5039]: I1124 15:27:53.799286 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_5894e63c-79c2-42a0-bc65-95f1a69a1525/nova-cell1-conductor-conductor/0.log" Nov 24 15:27:54 crc kubenswrapper[5039]: I1124 15:27:54.101939 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-dxsjc_49f0a456-4039-4471-9dd2-c17ea42981e3/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:27:54 crc kubenswrapper[5039]: I1124 15:27:54.162605 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_8bcc04e6-8265-45a0-9883-cf6831c72a9c/nova-cell1-novncproxy-novncproxy/0.log" Nov 24 15:27:54 crc kubenswrapper[5039]: I1124 15:27:54.307677 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:27:54 crc kubenswrapper[5039]: E1124 15:27:54.320835 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:27:54 crc kubenswrapper[5039]: I1124 15:27:54.515887 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_1c245130-8f33-4226-b312-9573746acd0f/nova-metadata-log/0.log" Nov 24 15:27:54 crc kubenswrapper[5039]: I1124 15:27:54.566343 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_42be8bb1-8823-4a1f-8777-348baedb7758/nova-api-api/0.log" Nov 24 15:27:54 crc kubenswrapper[5039]: I1124 15:27:54.823199 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_1dcf47d4-1399-46bb-bda8-5dfeb96a3b60/mysql-bootstrap/0.log" Nov 24 15:27:55 crc kubenswrapper[5039]: I1124 15:27:55.007974 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_1dcf47d4-1399-46bb-bda8-5dfeb96a3b60/mysql-bootstrap/0.log" Nov 24 15:27:55 crc kubenswrapper[5039]: I1124 15:27:55.017370 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_04160090-3eab-412c-a6e0-6946a44bcb81/nova-scheduler-scheduler/0.log" Nov 24 15:27:55 crc kubenswrapper[5039]: I1124 15:27:55.123497 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_1dcf47d4-1399-46bb-bda8-5dfeb96a3b60/galera/0.log" Nov 24 15:27:55 crc kubenswrapper[5039]: I1124 15:27:55.193993 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_c3dc205b-caf2-45c8-8110-d0f8be91e10f/mysql-bootstrap/0.log" Nov 24 15:27:55 crc kubenswrapper[5039]: I1124 15:27:55.491441 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_c3dc205b-caf2-45c8-8110-d0f8be91e10f/galera/0.log" Nov 24 15:27:55 crc kubenswrapper[5039]: I1124 15:27:55.515221 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_c3dc205b-caf2-45c8-8110-d0f8be91e10f/mysql-bootstrap/0.log" Nov 24 15:27:55 crc kubenswrapper[5039]: I1124 15:27:55.667782 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_bbd0fae4-aa15-46d2-8118-f738c3c1dc3c/openstackclient/0.log" Nov 24 15:27:55 crc kubenswrapper[5039]: I1124 15:27:55.844995 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-5dqj9_4fc86906-5a7c-4bfe-8d23-1c98a8711a4a/ovn-controller/0.log" Nov 24 15:27:56 crc kubenswrapper[5039]: I1124 15:27:56.007797 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-wb4sk_6e986f2a-8ff1-4efb-aff0-7e294c0845bf/openstack-network-exporter/0.log" Nov 24 15:27:56 crc kubenswrapper[5039]: I1124 15:27:56.178467 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-2cfx8_cfe8f618-f843-4051-9491-cb3d06e1a1bc/ovsdb-server-init/0.log" Nov 24 15:27:56 crc kubenswrapper[5039]: I1124 15:27:56.415612 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-2cfx8_cfe8f618-f843-4051-9491-cb3d06e1a1bc/ovsdb-server/0.log" Nov 24 15:27:56 crc kubenswrapper[5039]: I1124 15:27:56.425276 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-2cfx8_cfe8f618-f843-4051-9491-cb3d06e1a1bc/ovs-vswitchd/0.log" Nov 24 15:27:56 crc kubenswrapper[5039]: I1124 15:27:56.460544 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-2cfx8_cfe8f618-f843-4051-9491-cb3d06e1a1bc/ovsdb-server-init/0.log" Nov 24 15:27:56 crc kubenswrapper[5039]: I1124 15:27:56.735313 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-h8rn4_a1fa909b-2535-405a-9969-fc0ca9ff77fc/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:27:56 crc kubenswrapper[5039]: I1124 15:27:56.895579 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_9523b0b0-e489-4eb8-8954-83dd766373df/openstack-network-exporter/0.log" Nov 24 15:27:56 crc kubenswrapper[5039]: I1124 15:27:56.949380 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_9523b0b0-e489-4eb8-8954-83dd766373df/ovn-northd/0.log" Nov 24 15:27:57 crc kubenswrapper[5039]: I1124 15:27:57.108580 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9/openstack-network-exporter/0.log" Nov 24 15:27:57 crc kubenswrapper[5039]: I1124 15:27:57.212877 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_59b3b4cd-dfa0-4d92-a57b-8be015d0bdd9/ovsdbserver-nb/0.log" Nov 24 15:27:57 crc kubenswrapper[5039]: I1124 15:27:57.357691 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_a5a69761-eccd-49e6-8749-86142600d287/openstack-network-exporter/0.log" Nov 24 15:27:57 crc kubenswrapper[5039]: I1124 15:27:57.463218 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_a5a69761-eccd-49e6-8749-86142600d287/ovsdbserver-sb/0.log" Nov 24 15:27:57 crc kubenswrapper[5039]: I1124 15:27:57.577406 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_1c245130-8f33-4226-b312-9573746acd0f/nova-metadata-metadata/0.log" Nov 24 15:27:57 crc kubenswrapper[5039]: I1124 15:27:57.761197 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-66cb4657dd-z97bx_7cd31c1b-3250-444a-a717-88349d2c57a0/placement-api/0.log" Nov 24 15:27:57 crc kubenswrapper[5039]: I1124 15:27:57.789784 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_54819035-007f-4162-9419-d825f50e1ce9/init-config-reloader/0.log" Nov 24 15:27:57 crc kubenswrapper[5039]: I1124 15:27:57.901168 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-66cb4657dd-z97bx_7cd31c1b-3250-444a-a717-88349d2c57a0/placement-log/0.log" Nov 24 15:27:57 crc kubenswrapper[5039]: I1124 15:27:57.993939 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_54819035-007f-4162-9419-d825f50e1ce9/init-config-reloader/0.log" Nov 24 15:27:58 crc kubenswrapper[5039]: I1124 15:27:58.028249 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_54819035-007f-4162-9419-d825f50e1ce9/config-reloader/0.log" Nov 24 15:27:58 crc kubenswrapper[5039]: I1124 15:27:58.121906 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_54819035-007f-4162-9419-d825f50e1ce9/prometheus/0.log" Nov 24 15:27:58 crc kubenswrapper[5039]: I1124 15:27:58.140077 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_54819035-007f-4162-9419-d825f50e1ce9/thanos-sidecar/0.log" Nov 24 15:27:58 crc kubenswrapper[5039]: I1124 15:27:58.279284 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_c2b248b0-d5b6-4800-9f0a-915f03d73696/setup-container/0.log" Nov 24 15:27:58 crc kubenswrapper[5039]: I1124 15:27:58.519101 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_c2b248b0-d5b6-4800-9f0a-915f03d73696/setup-container/0.log" Nov 24 15:27:58 crc kubenswrapper[5039]: I1124 15:27:58.529095 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_b820e90e-779c-4300-b0e0-affe5118e73f/setup-container/0.log" Nov 24 15:27:58 crc kubenswrapper[5039]: I1124 15:27:58.604028 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_c2b248b0-d5b6-4800-9f0a-915f03d73696/rabbitmq/0.log" Nov 24 15:27:58 crc kubenswrapper[5039]: I1124 15:27:58.728824 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_b820e90e-779c-4300-b0e0-affe5118e73f/setup-container/0.log" Nov 24 15:27:58 crc kubenswrapper[5039]: I1124 15:27:58.804578 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_b820e90e-779c-4300-b0e0-affe5118e73f/rabbitmq/0.log" Nov 24 15:27:58 crc kubenswrapper[5039]: I1124 15:27:58.844222 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-hvq4m_76784bc3-6a6c-4ecc-8799-daffb50a9ca3/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:27:59 crc kubenswrapper[5039]: I1124 15:27:59.042933 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-hjnhj_c2bf2f79-d7fa-47ca-a8fc-b48d77875208/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:27:59 crc kubenswrapper[5039]: I1124 15:27:59.070340 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-hnwls_7ef0c61d-e6e4-49d2-949c-ed412b59186f/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:27:59 crc kubenswrapper[5039]: I1124 15:27:59.214733 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-bqd9r_20f1c3b8-744c-45ab-b82c-07fab5659614/ssh-known-hosts-edpm-deployment/0.log" Nov 24 15:27:59 crc kubenswrapper[5039]: I1124 15:27:59.503264 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-5b66587b55-thzjl_bd1bf6a5-309b-4960-8f37-34b006db3599/proxy-server/0.log" Nov 24 15:27:59 crc kubenswrapper[5039]: I1124 15:27:59.574589 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-vnskv_d7a6efff-c0ad-43c3-999c-d4840d3c5825/swift-ring-rebalance/0.log" Nov 24 15:27:59 crc kubenswrapper[5039]: I1124 15:27:59.737256 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/account-auditor/0.log" Nov 24 15:27:59 crc kubenswrapper[5039]: I1124 15:27:59.741900 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-5b66587b55-thzjl_bd1bf6a5-309b-4960-8f37-34b006db3599/proxy-httpd/0.log" Nov 24 15:27:59 crc kubenswrapper[5039]: I1124 15:27:59.838097 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/account-reaper/0.log" Nov 24 15:28:00 crc kubenswrapper[5039]: I1124 15:28:00.007841 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/account-server/0.log" Nov 24 15:28:00 crc kubenswrapper[5039]: I1124 15:28:00.059318 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/account-replicator/0.log" Nov 24 15:28:00 crc kubenswrapper[5039]: I1124 15:28:00.119545 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/container-auditor/0.log" Nov 24 15:28:00 crc kubenswrapper[5039]: I1124 15:28:00.128073 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/container-replicator/0.log" Nov 24 15:28:00 crc kubenswrapper[5039]: I1124 15:28:00.271888 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/container-server/0.log" Nov 24 15:28:00 crc kubenswrapper[5039]: I1124 15:28:00.332258 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/container-updater/0.log" Nov 24 15:28:00 crc kubenswrapper[5039]: I1124 15:28:00.362379 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/object-expirer/0.log" Nov 24 15:28:00 crc kubenswrapper[5039]: I1124 15:28:00.387987 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/object-auditor/0.log" Nov 24 15:28:00 crc kubenswrapper[5039]: I1124 15:28:00.530601 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/object-server/0.log" Nov 24 15:28:00 crc kubenswrapper[5039]: I1124 15:28:00.551656 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/object-replicator/0.log" Nov 24 15:28:00 crc kubenswrapper[5039]: I1124 15:28:00.576573 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/object-updater/0.log" Nov 24 15:28:00 crc kubenswrapper[5039]: I1124 15:28:00.604360 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7nqfh" Nov 24 15:28:00 crc kubenswrapper[5039]: I1124 15:28:00.634269 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/rsync/0.log" Nov 24 15:28:00 crc kubenswrapper[5039]: I1124 15:28:00.692298 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7nqfh" Nov 24 15:28:00 crc kubenswrapper[5039]: I1124 15:28:00.784255 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ac0d32e6-ff0b-4d8e-9094-b0edcc49cc8f/swift-recon-cron/0.log" Nov 24 15:28:00 crc kubenswrapper[5039]: I1124 15:28:00.818642 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-fg2p2_d1d48eba-5a90-4ca3-b298-f19175f93608/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:28:01 crc kubenswrapper[5039]: I1124 15:28:01.011815 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-power-monitoring-edpm-deployment-openstack-edpm-5jnt5_d8589776-bb1f-42ea-8bfa-7053520c66b7/telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:28:01 crc kubenswrapper[5039]: I1124 15:28:01.213180 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_2c05319a-d5d8-4585-8d73-0bc049535803/test-operator-logs-container/0.log" Nov 24 15:28:01 crc kubenswrapper[5039]: I1124 15:28:01.419693 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7nqfh"] Nov 24 15:28:01 crc kubenswrapper[5039]: I1124 15:28:01.477390 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-jdlm7_f9911acb-6e34-497c-9346-18b3299f63be/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 15:28:01 crc kubenswrapper[5039]: I1124 15:28:01.905330 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7nqfh" podUID="0c28f177-f335-43ff-aa8d-0ddf05759fba" containerName="registry-server" containerID="cri-o://132315a29728a118d430f72e8a757e634ccdcbe2749c18321813908b858e1868" gracePeriod=2 Nov 24 15:28:02 crc kubenswrapper[5039]: I1124 15:28:02.181945 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_e515a4f0-d838-4d61-906b-f26a0c07f8c8/tempest-tests-tempest-tests-runner/0.log" Nov 24 15:28:02 crc kubenswrapper[5039]: I1124 15:28:02.490054 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7nqfh" Nov 24 15:28:02 crc kubenswrapper[5039]: I1124 15:28:02.663969 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c28f177-f335-43ff-aa8d-0ddf05759fba-catalog-content\") pod \"0c28f177-f335-43ff-aa8d-0ddf05759fba\" (UID: \"0c28f177-f335-43ff-aa8d-0ddf05759fba\") " Nov 24 15:28:02 crc kubenswrapper[5039]: I1124 15:28:02.664034 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5w6pw\" (UniqueName: \"kubernetes.io/projected/0c28f177-f335-43ff-aa8d-0ddf05759fba-kube-api-access-5w6pw\") pod \"0c28f177-f335-43ff-aa8d-0ddf05759fba\" (UID: \"0c28f177-f335-43ff-aa8d-0ddf05759fba\") " Nov 24 15:28:02 crc kubenswrapper[5039]: I1124 15:28:02.664274 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c28f177-f335-43ff-aa8d-0ddf05759fba-utilities\") pod \"0c28f177-f335-43ff-aa8d-0ddf05759fba\" (UID: \"0c28f177-f335-43ff-aa8d-0ddf05759fba\") " Nov 24 15:28:02 crc kubenswrapper[5039]: I1124 15:28:02.664815 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c28f177-f335-43ff-aa8d-0ddf05759fba-utilities" (OuterVolumeSpecName: "utilities") pod "0c28f177-f335-43ff-aa8d-0ddf05759fba" (UID: "0c28f177-f335-43ff-aa8d-0ddf05759fba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:28:02 crc kubenswrapper[5039]: I1124 15:28:02.671704 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c28f177-f335-43ff-aa8d-0ddf05759fba-kube-api-access-5w6pw" (OuterVolumeSpecName: "kube-api-access-5w6pw") pod "0c28f177-f335-43ff-aa8d-0ddf05759fba" (UID: "0c28f177-f335-43ff-aa8d-0ddf05759fba"). InnerVolumeSpecName "kube-api-access-5w6pw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:28:02 crc kubenswrapper[5039]: I1124 15:28:02.767111 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5w6pw\" (UniqueName: \"kubernetes.io/projected/0c28f177-f335-43ff-aa8d-0ddf05759fba-kube-api-access-5w6pw\") on node \"crc\" DevicePath \"\"" Nov 24 15:28:02 crc kubenswrapper[5039]: I1124 15:28:02.767385 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c28f177-f335-43ff-aa8d-0ddf05759fba-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 15:28:02 crc kubenswrapper[5039]: I1124 15:28:02.770629 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c28f177-f335-43ff-aa8d-0ddf05759fba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0c28f177-f335-43ff-aa8d-0ddf05759fba" (UID: "0c28f177-f335-43ff-aa8d-0ddf05759fba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:28:02 crc kubenswrapper[5039]: I1124 15:28:02.869080 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c28f177-f335-43ff-aa8d-0ddf05759fba-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 15:28:02 crc kubenswrapper[5039]: I1124 15:28:02.920719 5039 generic.go:334] "Generic (PLEG): container finished" podID="0c28f177-f335-43ff-aa8d-0ddf05759fba" containerID="132315a29728a118d430f72e8a757e634ccdcbe2749c18321813908b858e1868" exitCode=0 Nov 24 15:28:02 crc kubenswrapper[5039]: I1124 15:28:02.920770 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7nqfh" event={"ID":"0c28f177-f335-43ff-aa8d-0ddf05759fba","Type":"ContainerDied","Data":"132315a29728a118d430f72e8a757e634ccdcbe2749c18321813908b858e1868"} Nov 24 15:28:02 crc kubenswrapper[5039]: I1124 15:28:02.920812 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7nqfh" event={"ID":"0c28f177-f335-43ff-aa8d-0ddf05759fba","Type":"ContainerDied","Data":"88112ad69066435f6ba185378809e2503b5caf9751a0c7469d808dc672bcb1e2"} Nov 24 15:28:02 crc kubenswrapper[5039]: I1124 15:28:02.920822 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7nqfh" Nov 24 15:28:02 crc kubenswrapper[5039]: I1124 15:28:02.920835 5039 scope.go:117] "RemoveContainer" containerID="132315a29728a118d430f72e8a757e634ccdcbe2749c18321813908b858e1868" Nov 24 15:28:02 crc kubenswrapper[5039]: I1124 15:28:02.942572 5039 scope.go:117] "RemoveContainer" containerID="9cf7ab40434bc5be82326506485d880e26cc990ca32b8e3265f0cd1c5c8348c2" Nov 24 15:28:02 crc kubenswrapper[5039]: I1124 15:28:02.969466 5039 scope.go:117] "RemoveContainer" containerID="d9061f86d7aadd1997f10ea82854ae81e4977bc5db93651cdffc2eade8a5def1" Nov 24 15:28:02 crc kubenswrapper[5039]: I1124 15:28:02.974277 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7nqfh"] Nov 24 15:28:02 crc kubenswrapper[5039]: I1124 15:28:02.984527 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7nqfh"] Nov 24 15:28:03 crc kubenswrapper[5039]: I1124 15:28:03.017890 5039 scope.go:117] "RemoveContainer" containerID="132315a29728a118d430f72e8a757e634ccdcbe2749c18321813908b858e1868" Nov 24 15:28:03 crc kubenswrapper[5039]: E1124 15:28:03.018309 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"132315a29728a118d430f72e8a757e634ccdcbe2749c18321813908b858e1868\": container with ID starting with 132315a29728a118d430f72e8a757e634ccdcbe2749c18321813908b858e1868 not found: ID does not exist" containerID="132315a29728a118d430f72e8a757e634ccdcbe2749c18321813908b858e1868" Nov 24 15:28:03 crc kubenswrapper[5039]: I1124 15:28:03.018337 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"132315a29728a118d430f72e8a757e634ccdcbe2749c18321813908b858e1868"} err="failed to get container status \"132315a29728a118d430f72e8a757e634ccdcbe2749c18321813908b858e1868\": rpc error: code = NotFound desc = could not find container \"132315a29728a118d430f72e8a757e634ccdcbe2749c18321813908b858e1868\": container with ID starting with 132315a29728a118d430f72e8a757e634ccdcbe2749c18321813908b858e1868 not found: ID does not exist" Nov 24 15:28:03 crc kubenswrapper[5039]: I1124 15:28:03.018364 5039 scope.go:117] "RemoveContainer" containerID="9cf7ab40434bc5be82326506485d880e26cc990ca32b8e3265f0cd1c5c8348c2" Nov 24 15:28:03 crc kubenswrapper[5039]: E1124 15:28:03.018658 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9cf7ab40434bc5be82326506485d880e26cc990ca32b8e3265f0cd1c5c8348c2\": container with ID starting with 9cf7ab40434bc5be82326506485d880e26cc990ca32b8e3265f0cd1c5c8348c2 not found: ID does not exist" containerID="9cf7ab40434bc5be82326506485d880e26cc990ca32b8e3265f0cd1c5c8348c2" Nov 24 15:28:03 crc kubenswrapper[5039]: I1124 15:28:03.018674 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9cf7ab40434bc5be82326506485d880e26cc990ca32b8e3265f0cd1c5c8348c2"} err="failed to get container status \"9cf7ab40434bc5be82326506485d880e26cc990ca32b8e3265f0cd1c5c8348c2\": rpc error: code = NotFound desc = could not find container \"9cf7ab40434bc5be82326506485d880e26cc990ca32b8e3265f0cd1c5c8348c2\": container with ID starting with 9cf7ab40434bc5be82326506485d880e26cc990ca32b8e3265f0cd1c5c8348c2 not found: ID does not exist" Nov 24 15:28:03 crc kubenswrapper[5039]: I1124 15:28:03.018689 5039 scope.go:117] "RemoveContainer" containerID="d9061f86d7aadd1997f10ea82854ae81e4977bc5db93651cdffc2eade8a5def1" Nov 24 15:28:03 crc kubenswrapper[5039]: E1124 15:28:03.018881 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9061f86d7aadd1997f10ea82854ae81e4977bc5db93651cdffc2eade8a5def1\": container with ID starting with d9061f86d7aadd1997f10ea82854ae81e4977bc5db93651cdffc2eade8a5def1 not found: ID does not exist" containerID="d9061f86d7aadd1997f10ea82854ae81e4977bc5db93651cdffc2eade8a5def1" Nov 24 15:28:03 crc kubenswrapper[5039]: I1124 15:28:03.018897 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9061f86d7aadd1997f10ea82854ae81e4977bc5db93651cdffc2eade8a5def1"} err="failed to get container status \"d9061f86d7aadd1997f10ea82854ae81e4977bc5db93651cdffc2eade8a5def1\": rpc error: code = NotFound desc = could not find container \"d9061f86d7aadd1997f10ea82854ae81e4977bc5db93651cdffc2eade8a5def1\": container with ID starting with d9061f86d7aadd1997f10ea82854ae81e4977bc5db93651cdffc2eade8a5def1 not found: ID does not exist" Nov 24 15:28:04 crc kubenswrapper[5039]: I1124 15:28:04.316865 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c28f177-f335-43ff-aa8d-0ddf05759fba" path="/var/lib/kubelet/pods/0c28f177-f335-43ff-aa8d-0ddf05759fba/volumes" Nov 24 15:28:09 crc kubenswrapper[5039]: I1124 15:28:09.306443 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:28:09 crc kubenswrapper[5039]: E1124 15:28:09.307062 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:28:15 crc kubenswrapper[5039]: I1124 15:28:15.905357 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_3cef0b8e-d050-4055-a798-31b108727299/memcached/0.log" Nov 24 15:28:21 crc kubenswrapper[5039]: I1124 15:28:21.880471 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vfm8m"] Nov 24 15:28:21 crc kubenswrapper[5039]: E1124 15:28:21.881807 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c28f177-f335-43ff-aa8d-0ddf05759fba" containerName="extract-content" Nov 24 15:28:21 crc kubenswrapper[5039]: I1124 15:28:21.881839 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c28f177-f335-43ff-aa8d-0ddf05759fba" containerName="extract-content" Nov 24 15:28:21 crc kubenswrapper[5039]: E1124 15:28:21.881874 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c28f177-f335-43ff-aa8d-0ddf05759fba" containerName="registry-server" Nov 24 15:28:21 crc kubenswrapper[5039]: I1124 15:28:21.881886 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c28f177-f335-43ff-aa8d-0ddf05759fba" containerName="registry-server" Nov 24 15:28:21 crc kubenswrapper[5039]: E1124 15:28:21.881928 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c28f177-f335-43ff-aa8d-0ddf05759fba" containerName="extract-utilities" Nov 24 15:28:21 crc kubenswrapper[5039]: I1124 15:28:21.881939 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c28f177-f335-43ff-aa8d-0ddf05759fba" containerName="extract-utilities" Nov 24 15:28:21 crc kubenswrapper[5039]: I1124 15:28:21.882278 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c28f177-f335-43ff-aa8d-0ddf05759fba" containerName="registry-server" Nov 24 15:28:21 crc kubenswrapper[5039]: I1124 15:28:21.884988 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vfm8m" Nov 24 15:28:21 crc kubenswrapper[5039]: I1124 15:28:21.900550 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vfm8m"] Nov 24 15:28:22 crc kubenswrapper[5039]: I1124 15:28:22.003087 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgppv\" (UniqueName: \"kubernetes.io/projected/47e48300-f92e-4f8d-9c86-cdb31e59016b-kube-api-access-vgppv\") pod \"redhat-marketplace-vfm8m\" (UID: \"47e48300-f92e-4f8d-9c86-cdb31e59016b\") " pod="openshift-marketplace/redhat-marketplace-vfm8m" Nov 24 15:28:22 crc kubenswrapper[5039]: I1124 15:28:22.003171 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47e48300-f92e-4f8d-9c86-cdb31e59016b-utilities\") pod \"redhat-marketplace-vfm8m\" (UID: \"47e48300-f92e-4f8d-9c86-cdb31e59016b\") " pod="openshift-marketplace/redhat-marketplace-vfm8m" Nov 24 15:28:22 crc kubenswrapper[5039]: I1124 15:28:22.003405 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47e48300-f92e-4f8d-9c86-cdb31e59016b-catalog-content\") pod \"redhat-marketplace-vfm8m\" (UID: \"47e48300-f92e-4f8d-9c86-cdb31e59016b\") " pod="openshift-marketplace/redhat-marketplace-vfm8m" Nov 24 15:28:22 crc kubenswrapper[5039]: I1124 15:28:22.105535 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgppv\" (UniqueName: \"kubernetes.io/projected/47e48300-f92e-4f8d-9c86-cdb31e59016b-kube-api-access-vgppv\") pod \"redhat-marketplace-vfm8m\" (UID: \"47e48300-f92e-4f8d-9c86-cdb31e59016b\") " pod="openshift-marketplace/redhat-marketplace-vfm8m" Nov 24 15:28:22 crc kubenswrapper[5039]: I1124 15:28:22.105604 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47e48300-f92e-4f8d-9c86-cdb31e59016b-utilities\") pod \"redhat-marketplace-vfm8m\" (UID: \"47e48300-f92e-4f8d-9c86-cdb31e59016b\") " pod="openshift-marketplace/redhat-marketplace-vfm8m" Nov 24 15:28:22 crc kubenswrapper[5039]: I1124 15:28:22.105638 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47e48300-f92e-4f8d-9c86-cdb31e59016b-catalog-content\") pod \"redhat-marketplace-vfm8m\" (UID: \"47e48300-f92e-4f8d-9c86-cdb31e59016b\") " pod="openshift-marketplace/redhat-marketplace-vfm8m" Nov 24 15:28:22 crc kubenswrapper[5039]: I1124 15:28:22.106084 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47e48300-f92e-4f8d-9c86-cdb31e59016b-utilities\") pod \"redhat-marketplace-vfm8m\" (UID: \"47e48300-f92e-4f8d-9c86-cdb31e59016b\") " pod="openshift-marketplace/redhat-marketplace-vfm8m" Nov 24 15:28:22 crc kubenswrapper[5039]: I1124 15:28:22.106136 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47e48300-f92e-4f8d-9c86-cdb31e59016b-catalog-content\") pod \"redhat-marketplace-vfm8m\" (UID: \"47e48300-f92e-4f8d-9c86-cdb31e59016b\") " pod="openshift-marketplace/redhat-marketplace-vfm8m" Nov 24 15:28:22 crc kubenswrapper[5039]: I1124 15:28:22.125405 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgppv\" (UniqueName: \"kubernetes.io/projected/47e48300-f92e-4f8d-9c86-cdb31e59016b-kube-api-access-vgppv\") pod \"redhat-marketplace-vfm8m\" (UID: \"47e48300-f92e-4f8d-9c86-cdb31e59016b\") " pod="openshift-marketplace/redhat-marketplace-vfm8m" Nov 24 15:28:22 crc kubenswrapper[5039]: I1124 15:28:22.218814 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vfm8m" Nov 24 15:28:22 crc kubenswrapper[5039]: I1124 15:28:22.784164 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vfm8m"] Nov 24 15:28:23 crc kubenswrapper[5039]: I1124 15:28:23.130759 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vfm8m" event={"ID":"47e48300-f92e-4f8d-9c86-cdb31e59016b","Type":"ContainerStarted","Data":"3ac320a5a5ae7577eceb03645162da51a1ecde6fa2166a98fff4745a9ad12cb2"} Nov 24 15:28:23 crc kubenswrapper[5039]: I1124 15:28:23.131063 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vfm8m" event={"ID":"47e48300-f92e-4f8d-9c86-cdb31e59016b","Type":"ContainerStarted","Data":"48f6f27d8a87333691dfe2f5fcb024eddffd5634eb561954ddbbd2db5e1a3a6a"} Nov 24 15:28:24 crc kubenswrapper[5039]: I1124 15:28:24.141790 5039 generic.go:334] "Generic (PLEG): container finished" podID="47e48300-f92e-4f8d-9c86-cdb31e59016b" containerID="3ac320a5a5ae7577eceb03645162da51a1ecde6fa2166a98fff4745a9ad12cb2" exitCode=0 Nov 24 15:28:24 crc kubenswrapper[5039]: I1124 15:28:24.141839 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vfm8m" event={"ID":"47e48300-f92e-4f8d-9c86-cdb31e59016b","Type":"ContainerDied","Data":"3ac320a5a5ae7577eceb03645162da51a1ecde6fa2166a98fff4745a9ad12cb2"} Nov 24 15:28:24 crc kubenswrapper[5039]: I1124 15:28:24.144224 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 15:28:24 crc kubenswrapper[5039]: I1124 15:28:24.307448 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:28:24 crc kubenswrapper[5039]: E1124 15:28:24.307724 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:28:26 crc kubenswrapper[5039]: I1124 15:28:26.167388 5039 generic.go:334] "Generic (PLEG): container finished" podID="47e48300-f92e-4f8d-9c86-cdb31e59016b" containerID="c785c067f7b43e662f2ff90c4f1af64a9b82dfa8ff23e7a7db5947dd82c44264" exitCode=0 Nov 24 15:28:26 crc kubenswrapper[5039]: I1124 15:28:26.167561 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vfm8m" event={"ID":"47e48300-f92e-4f8d-9c86-cdb31e59016b","Type":"ContainerDied","Data":"c785c067f7b43e662f2ff90c4f1af64a9b82dfa8ff23e7a7db5947dd82c44264"} Nov 24 15:28:27 crc kubenswrapper[5039]: I1124 15:28:27.184157 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vfm8m" event={"ID":"47e48300-f92e-4f8d-9c86-cdb31e59016b","Type":"ContainerStarted","Data":"10cbcb0b051a1d7c568238df70c122b6f4dad4c19350faa8c799654e98c38e71"} Nov 24 15:28:27 crc kubenswrapper[5039]: I1124 15:28:27.210674 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vfm8m" podStartSLOduration=3.76124955 podStartE2EDuration="6.210657945s" podCreationTimestamp="2025-11-24 15:28:21 +0000 UTC" firstStartedPulling="2025-11-24 15:28:24.143940755 +0000 UTC m=+7816.583065275" lastFinishedPulling="2025-11-24 15:28:26.59334916 +0000 UTC m=+7819.032473670" observedRunningTime="2025-11-24 15:28:27.205954479 +0000 UTC m=+7819.645078979" watchObservedRunningTime="2025-11-24 15:28:27.210657945 +0000 UTC m=+7819.649782445" Nov 24 15:28:32 crc kubenswrapper[5039]: I1124 15:28:32.220066 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vfm8m" Nov 24 15:28:32 crc kubenswrapper[5039]: I1124 15:28:32.221523 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vfm8m" Nov 24 15:28:32 crc kubenswrapper[5039]: I1124 15:28:32.271208 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vfm8m" Nov 24 15:28:32 crc kubenswrapper[5039]: I1124 15:28:32.545080 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-86dc4d89c8-sgqwz_b913096c-9ece-4755-9545-0116fbc53123/kube-rbac-proxy/0.log" Nov 24 15:28:32 crc kubenswrapper[5039]: I1124 15:28:32.587697 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-86dc4d89c8-sgqwz_b913096c-9ece-4755-9545-0116fbc53123/manager/0.log" Nov 24 15:28:32 crc kubenswrapper[5039]: I1124 15:28:32.749872 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-79856dc55c-whklh_396e7965-a743-4028-989b-e3610abb5a3a/kube-rbac-proxy/0.log" Nov 24 15:28:32 crc kubenswrapper[5039]: I1124 15:28:32.859761 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-79856dc55c-whklh_396e7965-a743-4028-989b-e3610abb5a3a/manager/0.log" Nov 24 15:28:32 crc kubenswrapper[5039]: I1124 15:28:32.900358 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d695c9b56-6nwfx_99f1711f-1dd9-471d-9a2d-8c6e0a46fb0d/kube-rbac-proxy/0.log" Nov 24 15:28:32 crc kubenswrapper[5039]: I1124 15:28:32.974553 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d695c9b56-6nwfx_99f1711f-1dd9-471d-9a2d-8c6e0a46fb0d/manager/0.log" Nov 24 15:28:33 crc kubenswrapper[5039]: I1124 15:28:33.279321 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b_0dc29372-3c0b-496e-b027-e57abc3ca956/util/0.log" Nov 24 15:28:33 crc kubenswrapper[5039]: I1124 15:28:33.302442 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vfm8m" Nov 24 15:28:33 crc kubenswrapper[5039]: I1124 15:28:33.379698 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vfm8m"] Nov 24 15:28:33 crc kubenswrapper[5039]: I1124 15:28:33.629665 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b_0dc29372-3c0b-496e-b027-e57abc3ca956/pull/0.log" Nov 24 15:28:33 crc kubenswrapper[5039]: I1124 15:28:33.662246 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b_0dc29372-3c0b-496e-b027-e57abc3ca956/util/0.log" Nov 24 15:28:33 crc kubenswrapper[5039]: I1124 15:28:33.717995 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b_0dc29372-3c0b-496e-b027-e57abc3ca956/pull/0.log" Nov 24 15:28:33 crc kubenswrapper[5039]: I1124 15:28:33.869598 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b_0dc29372-3c0b-496e-b027-e57abc3ca956/util/0.log" Nov 24 15:28:33 crc kubenswrapper[5039]: I1124 15:28:33.915459 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b_0dc29372-3c0b-496e-b027-e57abc3ca956/extract/0.log" Nov 24 15:28:33 crc kubenswrapper[5039]: I1124 15:28:33.937248 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e84265675a354f06eb89373e478e2910a0fa2be5c94d75f424506a3f17djc4b_0dc29372-3c0b-496e-b027-e57abc3ca956/pull/0.log" Nov 24 15:28:34 crc kubenswrapper[5039]: I1124 15:28:34.081474 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-68b95954c9-l8cvk_0067c9ac-5dfc-4e0d-b316-161e02698ffd/kube-rbac-proxy/0.log" Nov 24 15:28:34 crc kubenswrapper[5039]: I1124 15:28:34.176724 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-774b86978c-bgn7g_bea08559-78a5-4287-85e8-a83768d94670/kube-rbac-proxy/0.log" Nov 24 15:28:34 crc kubenswrapper[5039]: I1124 15:28:34.208209 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-68b95954c9-l8cvk_0067c9ac-5dfc-4e0d-b316-161e02698ffd/manager/0.log" Nov 24 15:28:34 crc kubenswrapper[5039]: I1124 15:28:34.374963 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-774b86978c-bgn7g_bea08559-78a5-4287-85e8-a83768d94670/manager/0.log" Nov 24 15:28:34 crc kubenswrapper[5039]: I1124 15:28:34.432045 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c9694994-69pxs_eee87172-9357-412c-8eb2-7df01649f1d0/manager/0.log" Nov 24 15:28:34 crc kubenswrapper[5039]: I1124 15:28:34.443490 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c9694994-69pxs_eee87172-9357-412c-8eb2-7df01649f1d0/kube-rbac-proxy/0.log" Nov 24 15:28:34 crc kubenswrapper[5039]: I1124 15:28:34.670013 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-d5cc86f4b-j7gmp_bd656299-f7da-4ca8-aee9-25c389243cc9/kube-rbac-proxy/0.log" Nov 24 15:28:34 crc kubenswrapper[5039]: I1124 15:28:34.786233 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-d5cc86f4b-j7gmp_bd656299-f7da-4ca8-aee9-25c389243cc9/manager/0.log" Nov 24 15:28:34 crc kubenswrapper[5039]: I1124 15:28:34.847185 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5bfcdc958c-68gjk_35f14195-18aa-433d-8705-1aa24a8a1818/kube-rbac-proxy/0.log" Nov 24 15:28:34 crc kubenswrapper[5039]: I1124 15:28:34.916947 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5bfcdc958c-68gjk_35f14195-18aa-433d-8705-1aa24a8a1818/manager/0.log" Nov 24 15:28:35 crc kubenswrapper[5039]: I1124 15:28:35.017898 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-748dc6576f-2lmlh_29991995-423a-42c0-ae52-2b3c160a3e0c/kube-rbac-proxy/0.log" Nov 24 15:28:35 crc kubenswrapper[5039]: I1124 15:28:35.131637 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-748dc6576f-2lmlh_29991995-423a-42c0-ae52-2b3c160a3e0c/manager/0.log" Nov 24 15:28:35 crc kubenswrapper[5039]: I1124 15:28:35.231604 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58bb8d67cc-nng76_979a5bac-57c9-4d42-9af6-11228e980f7f/kube-rbac-proxy/0.log" Nov 24 15:28:35 crc kubenswrapper[5039]: I1124 15:28:35.259475 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vfm8m" podUID="47e48300-f92e-4f8d-9c86-cdb31e59016b" containerName="registry-server" containerID="cri-o://10cbcb0b051a1d7c568238df70c122b6f4dad4c19350faa8c799654e98c38e71" gracePeriod=2 Nov 24 15:28:35 crc kubenswrapper[5039]: I1124 15:28:35.285889 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58bb8d67cc-nng76_979a5bac-57c9-4d42-9af6-11228e980f7f/manager/0.log" Nov 24 15:28:35 crc kubenswrapper[5039]: I1124 15:28:35.385452 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c4fdb7-phdb6_3c29a4a4-1d0c-4a1f-a4b5-a67cb564707a/kube-rbac-proxy/0.log" Nov 24 15:28:35 crc kubenswrapper[5039]: I1124 15:28:35.493675 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c4fdb7-phdb6_3c29a4a4-1d0c-4a1f-a4b5-a67cb564707a/manager/0.log" Nov 24 15:28:35 crc kubenswrapper[5039]: I1124 15:28:35.570699 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7c57c8bbc4-rvq7d_edf2350a-f77f-45ec-87c1-35f7b38ddcb3/kube-rbac-proxy/0.log" Nov 24 15:28:35 crc kubenswrapper[5039]: I1124 15:28:35.674437 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7c57c8bbc4-rvq7d_edf2350a-f77f-45ec-87c1-35f7b38ddcb3/manager/0.log" Nov 24 15:28:35 crc kubenswrapper[5039]: I1124 15:28:35.809106 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-nwsck_98b88919-04d2-4c01-b45a-dd72afbbe179/kube-rbac-proxy/0.log" Nov 24 15:28:35 crc kubenswrapper[5039]: I1124 15:28:35.810561 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-nwsck_98b88919-04d2-4c01-b45a-dd72afbbe179/manager/0.log" Nov 24 15:28:35 crc kubenswrapper[5039]: I1124 15:28:35.832878 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vfm8m" Nov 24 15:28:35 crc kubenswrapper[5039]: I1124 15:28:35.907485 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47e48300-f92e-4f8d-9c86-cdb31e59016b-utilities\") pod \"47e48300-f92e-4f8d-9c86-cdb31e59016b\" (UID: \"47e48300-f92e-4f8d-9c86-cdb31e59016b\") " Nov 24 15:28:35 crc kubenswrapper[5039]: I1124 15:28:35.908012 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vgppv\" (UniqueName: \"kubernetes.io/projected/47e48300-f92e-4f8d-9c86-cdb31e59016b-kube-api-access-vgppv\") pod \"47e48300-f92e-4f8d-9c86-cdb31e59016b\" (UID: \"47e48300-f92e-4f8d-9c86-cdb31e59016b\") " Nov 24 15:28:35 crc kubenswrapper[5039]: I1124 15:28:35.908242 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47e48300-f92e-4f8d-9c86-cdb31e59016b-catalog-content\") pod \"47e48300-f92e-4f8d-9c86-cdb31e59016b\" (UID: \"47e48300-f92e-4f8d-9c86-cdb31e59016b\") " Nov 24 15:28:35 crc kubenswrapper[5039]: I1124 15:28:35.908473 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47e48300-f92e-4f8d-9c86-cdb31e59016b-utilities" (OuterVolumeSpecName: "utilities") pod "47e48300-f92e-4f8d-9c86-cdb31e59016b" (UID: "47e48300-f92e-4f8d-9c86-cdb31e59016b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:28:35 crc kubenswrapper[5039]: I1124 15:28:35.908974 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47e48300-f92e-4f8d-9c86-cdb31e59016b-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 15:28:35 crc kubenswrapper[5039]: I1124 15:28:35.914921 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47e48300-f92e-4f8d-9c86-cdb31e59016b-kube-api-access-vgppv" (OuterVolumeSpecName: "kube-api-access-vgppv") pod "47e48300-f92e-4f8d-9c86-cdb31e59016b" (UID: "47e48300-f92e-4f8d-9c86-cdb31e59016b"). InnerVolumeSpecName "kube-api-access-vgppv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:28:35 crc kubenswrapper[5039]: I1124 15:28:35.925985 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47e48300-f92e-4f8d-9c86-cdb31e59016b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "47e48300-f92e-4f8d-9c86-cdb31e59016b" (UID: "47e48300-f92e-4f8d-9c86-cdb31e59016b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.010666 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47e48300-f92e-4f8d-9c86-cdb31e59016b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.010696 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vgppv\" (UniqueName: \"kubernetes.io/projected/47e48300-f92e-4f8d-9c86-cdb31e59016b-kube-api-access-vgppv\") on node \"crc\" DevicePath \"\"" Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.030728 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-fd75fd47d-88sqn_e97ae0ee-d044-4b9d-a371-eec59a5ff932/manager/0.log" Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.046132 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-fd75fd47d-88sqn_e97ae0ee-d044-4b9d-a371-eec59a5ff932/kube-rbac-proxy/0.log" Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.167724 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb_3731fd87-4c6a-4fb0-a3d5-cf48e76a5448/kube-rbac-proxy/0.log" Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.266579 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-544b9bb9-4vqcb_3731fd87-4c6a-4fb0-a3d5-cf48e76a5448/manager/0.log" Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.272058 5039 generic.go:334] "Generic (PLEG): container finished" podID="47e48300-f92e-4f8d-9c86-cdb31e59016b" containerID="10cbcb0b051a1d7c568238df70c122b6f4dad4c19350faa8c799654e98c38e71" exitCode=0 Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.272091 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vfm8m" event={"ID":"47e48300-f92e-4f8d-9c86-cdb31e59016b","Type":"ContainerDied","Data":"10cbcb0b051a1d7c568238df70c122b6f4dad4c19350faa8c799654e98c38e71"} Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.272114 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vfm8m" event={"ID":"47e48300-f92e-4f8d-9c86-cdb31e59016b","Type":"ContainerDied","Data":"48f6f27d8a87333691dfe2f5fcb024eddffd5634eb561954ddbbd2db5e1a3a6a"} Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.272131 5039 scope.go:117] "RemoveContainer" containerID="10cbcb0b051a1d7c568238df70c122b6f4dad4c19350faa8c799654e98c38e71" Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.272232 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vfm8m" Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.295790 5039 scope.go:117] "RemoveContainer" containerID="c785c067f7b43e662f2ff90c4f1af64a9b82dfa8ff23e7a7db5947dd82c44264" Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.304540 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vfm8m"] Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.322915 5039 scope.go:117] "RemoveContainer" containerID="3ac320a5a5ae7577eceb03645162da51a1ecde6fa2166a98fff4745a9ad12cb2" Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.333952 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vfm8m"] Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.386409 5039 scope.go:117] "RemoveContainer" containerID="10cbcb0b051a1d7c568238df70c122b6f4dad4c19350faa8c799654e98c38e71" Nov 24 15:28:36 crc kubenswrapper[5039]: E1124 15:28:36.386807 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10cbcb0b051a1d7c568238df70c122b6f4dad4c19350faa8c799654e98c38e71\": container with ID starting with 10cbcb0b051a1d7c568238df70c122b6f4dad4c19350faa8c799654e98c38e71 not found: ID does not exist" containerID="10cbcb0b051a1d7c568238df70c122b6f4dad4c19350faa8c799654e98c38e71" Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.386832 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10cbcb0b051a1d7c568238df70c122b6f4dad4c19350faa8c799654e98c38e71"} err="failed to get container status \"10cbcb0b051a1d7c568238df70c122b6f4dad4c19350faa8c799654e98c38e71\": rpc error: code = NotFound desc = could not find container \"10cbcb0b051a1d7c568238df70c122b6f4dad4c19350faa8c799654e98c38e71\": container with ID starting with 10cbcb0b051a1d7c568238df70c122b6f4dad4c19350faa8c799654e98c38e71 not found: ID does not exist" Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.386852 5039 scope.go:117] "RemoveContainer" containerID="c785c067f7b43e662f2ff90c4f1af64a9b82dfa8ff23e7a7db5947dd82c44264" Nov 24 15:28:36 crc kubenswrapper[5039]: E1124 15:28:36.387071 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c785c067f7b43e662f2ff90c4f1af64a9b82dfa8ff23e7a7db5947dd82c44264\": container with ID starting with c785c067f7b43e662f2ff90c4f1af64a9b82dfa8ff23e7a7db5947dd82c44264 not found: ID does not exist" containerID="c785c067f7b43e662f2ff90c4f1af64a9b82dfa8ff23e7a7db5947dd82c44264" Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.387104 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c785c067f7b43e662f2ff90c4f1af64a9b82dfa8ff23e7a7db5947dd82c44264"} err="failed to get container status \"c785c067f7b43e662f2ff90c4f1af64a9b82dfa8ff23e7a7db5947dd82c44264\": rpc error: code = NotFound desc = could not find container \"c785c067f7b43e662f2ff90c4f1af64a9b82dfa8ff23e7a7db5947dd82c44264\": container with ID starting with c785c067f7b43e662f2ff90c4f1af64a9b82dfa8ff23e7a7db5947dd82c44264 not found: ID does not exist" Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.387127 5039 scope.go:117] "RemoveContainer" containerID="3ac320a5a5ae7577eceb03645162da51a1ecde6fa2166a98fff4745a9ad12cb2" Nov 24 15:28:36 crc kubenswrapper[5039]: E1124 15:28:36.387294 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ac320a5a5ae7577eceb03645162da51a1ecde6fa2166a98fff4745a9ad12cb2\": container with ID starting with 3ac320a5a5ae7577eceb03645162da51a1ecde6fa2166a98fff4745a9ad12cb2 not found: ID does not exist" containerID="3ac320a5a5ae7577eceb03645162da51a1ecde6fa2166a98fff4745a9ad12cb2" Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.387308 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ac320a5a5ae7577eceb03645162da51a1ecde6fa2166a98fff4745a9ad12cb2"} err="failed to get container status \"3ac320a5a5ae7577eceb03645162da51a1ecde6fa2166a98fff4745a9ad12cb2\": rpc error: code = NotFound desc = could not find container \"3ac320a5a5ae7577eceb03645162da51a1ecde6fa2166a98fff4745a9ad12cb2\": container with ID starting with 3ac320a5a5ae7577eceb03645162da51a1ecde6fa2166a98fff4745a9ad12cb2 not found: ID does not exist" Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.611485 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-lg47r_30a0cc80-e6f2-48b0-9469-32d5d397c0aa/registry-server/0.log" Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.671612 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-6ccdcd8b77-9k4cr_74fee1bc-d496-4e8d-9884-ce1b67a00e75/operator/0.log" Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.905518 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-66cf5c67ff-ft57m_058dcaa2-f18f-4eff-bfd1-d290a8fd36a1/kube-rbac-proxy/0.log" Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.930943 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-66cf5c67ff-ft57m_058dcaa2-f18f-4eff-bfd1-d290a8fd36a1/manager/0.log" Nov 24 15:28:36 crc kubenswrapper[5039]: I1124 15:28:36.968882 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5db546f9d9-hwn8q_865f4099-70b9-45a1-9bcd-c92882c9aab1/kube-rbac-proxy/0.log" Nov 24 15:28:37 crc kubenswrapper[5039]: I1124 15:28:37.338009 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5db546f9d9-hwn8q_865f4099-70b9-45a1-9bcd-c92882c9aab1/manager/0.log" Nov 24 15:28:37 crc kubenswrapper[5039]: I1124 15:28:37.559257 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-j9qqq_076a99d2-27b3-4d08-bdcc-876e1dec4f5f/operator/0.log" Nov 24 15:28:37 crc kubenswrapper[5039]: I1124 15:28:37.565402 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-6fdc4fcf86-qzwtb_90ff7526-7243-45b2-afaa-ee39dff42b46/kube-rbac-proxy/0.log" Nov 24 15:28:37 crc kubenswrapper[5039]: I1124 15:28:37.696038 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-6fdc4fcf86-qzwtb_90ff7526-7243-45b2-afaa-ee39dff42b46/manager/0.log" Nov 24 15:28:37 crc kubenswrapper[5039]: I1124 15:28:37.765330 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-bf6985ffc-g86nb_7f6bb6a1-8df6-4d15-8d27-a5bbc28b9b31/kube-rbac-proxy/0.log" Nov 24 15:28:37 crc kubenswrapper[5039]: I1124 15:28:37.772993 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7b5fb95979-n45b6_0f3b98cc-e2e5-45f3-8fc2-94cc1a2ce58a/manager/0.log" Nov 24 15:28:37 crc kubenswrapper[5039]: I1124 15:28:37.889692 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cb74df96-qk6bz_6cbb9e3e-f545-4d83-aee4-8e122c54437c/kube-rbac-proxy/0.log" Nov 24 15:28:37 crc kubenswrapper[5039]: I1124 15:28:37.987554 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cb74df96-qk6bz_6cbb9e3e-f545-4d83-aee4-8e122c54437c/manager/0.log" Nov 24 15:28:38 crc kubenswrapper[5039]: I1124 15:28:38.119441 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-bf6985ffc-g86nb_7f6bb6a1-8df6-4d15-8d27-a5bbc28b9b31/manager/0.log" Nov 24 15:28:38 crc kubenswrapper[5039]: I1124 15:28:38.157635 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-864885998-kg5jg_ab34fb1d-70af-4438-86c7-3856f1733097/kube-rbac-proxy/0.log" Nov 24 15:28:38 crc kubenswrapper[5039]: I1124 15:28:38.160002 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-864885998-kg5jg_ab34fb1d-70af-4438-86c7-3856f1733097/manager/0.log" Nov 24 15:28:38 crc kubenswrapper[5039]: I1124 15:28:38.331347 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:28:38 crc kubenswrapper[5039]: I1124 15:28:38.331529 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47e48300-f92e-4f8d-9c86-cdb31e59016b" path="/var/lib/kubelet/pods/47e48300-f92e-4f8d-9c86-cdb31e59016b/volumes" Nov 24 15:28:38 crc kubenswrapper[5039]: E1124 15:28:38.331646 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:28:46 crc kubenswrapper[5039]: I1124 15:28:46.054910 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4kjnn"] Nov 24 15:28:46 crc kubenswrapper[5039]: E1124 15:28:46.055861 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47e48300-f92e-4f8d-9c86-cdb31e59016b" containerName="extract-content" Nov 24 15:28:46 crc kubenswrapper[5039]: I1124 15:28:46.055876 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="47e48300-f92e-4f8d-9c86-cdb31e59016b" containerName="extract-content" Nov 24 15:28:46 crc kubenswrapper[5039]: E1124 15:28:46.055906 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47e48300-f92e-4f8d-9c86-cdb31e59016b" containerName="extract-utilities" Nov 24 15:28:46 crc kubenswrapper[5039]: I1124 15:28:46.055914 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="47e48300-f92e-4f8d-9c86-cdb31e59016b" containerName="extract-utilities" Nov 24 15:28:46 crc kubenswrapper[5039]: E1124 15:28:46.055947 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47e48300-f92e-4f8d-9c86-cdb31e59016b" containerName="registry-server" Nov 24 15:28:46 crc kubenswrapper[5039]: I1124 15:28:46.055956 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="47e48300-f92e-4f8d-9c86-cdb31e59016b" containerName="registry-server" Nov 24 15:28:46 crc kubenswrapper[5039]: I1124 15:28:46.056277 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="47e48300-f92e-4f8d-9c86-cdb31e59016b" containerName="registry-server" Nov 24 15:28:46 crc kubenswrapper[5039]: I1124 15:28:46.061747 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4kjnn" Nov 24 15:28:46 crc kubenswrapper[5039]: I1124 15:28:46.066108 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4kjnn"] Nov 24 15:28:46 crc kubenswrapper[5039]: I1124 15:28:46.132519 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2afaaa34-5c21-4785-a786-98eaa5fa2e83-catalog-content\") pod \"certified-operators-4kjnn\" (UID: \"2afaaa34-5c21-4785-a786-98eaa5fa2e83\") " pod="openshift-marketplace/certified-operators-4kjnn" Nov 24 15:28:46 crc kubenswrapper[5039]: I1124 15:28:46.132607 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5v57w\" (UniqueName: \"kubernetes.io/projected/2afaaa34-5c21-4785-a786-98eaa5fa2e83-kube-api-access-5v57w\") pod \"certified-operators-4kjnn\" (UID: \"2afaaa34-5c21-4785-a786-98eaa5fa2e83\") " pod="openshift-marketplace/certified-operators-4kjnn" Nov 24 15:28:46 crc kubenswrapper[5039]: I1124 15:28:46.132656 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2afaaa34-5c21-4785-a786-98eaa5fa2e83-utilities\") pod \"certified-operators-4kjnn\" (UID: \"2afaaa34-5c21-4785-a786-98eaa5fa2e83\") " pod="openshift-marketplace/certified-operators-4kjnn" Nov 24 15:28:46 crc kubenswrapper[5039]: I1124 15:28:46.234524 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5v57w\" (UniqueName: \"kubernetes.io/projected/2afaaa34-5c21-4785-a786-98eaa5fa2e83-kube-api-access-5v57w\") pod \"certified-operators-4kjnn\" (UID: \"2afaaa34-5c21-4785-a786-98eaa5fa2e83\") " pod="openshift-marketplace/certified-operators-4kjnn" Nov 24 15:28:46 crc kubenswrapper[5039]: I1124 15:28:46.234592 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2afaaa34-5c21-4785-a786-98eaa5fa2e83-utilities\") pod \"certified-operators-4kjnn\" (UID: \"2afaaa34-5c21-4785-a786-98eaa5fa2e83\") " pod="openshift-marketplace/certified-operators-4kjnn" Nov 24 15:28:46 crc kubenswrapper[5039]: I1124 15:28:46.234755 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2afaaa34-5c21-4785-a786-98eaa5fa2e83-catalog-content\") pod \"certified-operators-4kjnn\" (UID: \"2afaaa34-5c21-4785-a786-98eaa5fa2e83\") " pod="openshift-marketplace/certified-operators-4kjnn" Nov 24 15:28:46 crc kubenswrapper[5039]: I1124 15:28:46.235226 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2afaaa34-5c21-4785-a786-98eaa5fa2e83-catalog-content\") pod \"certified-operators-4kjnn\" (UID: \"2afaaa34-5c21-4785-a786-98eaa5fa2e83\") " pod="openshift-marketplace/certified-operators-4kjnn" Nov 24 15:28:46 crc kubenswrapper[5039]: I1124 15:28:46.235708 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2afaaa34-5c21-4785-a786-98eaa5fa2e83-utilities\") pod \"certified-operators-4kjnn\" (UID: \"2afaaa34-5c21-4785-a786-98eaa5fa2e83\") " pod="openshift-marketplace/certified-operators-4kjnn" Nov 24 15:28:46 crc kubenswrapper[5039]: I1124 15:28:46.269472 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5v57w\" (UniqueName: \"kubernetes.io/projected/2afaaa34-5c21-4785-a786-98eaa5fa2e83-kube-api-access-5v57w\") pod \"certified-operators-4kjnn\" (UID: \"2afaaa34-5c21-4785-a786-98eaa5fa2e83\") " pod="openshift-marketplace/certified-operators-4kjnn" Nov 24 15:28:46 crc kubenswrapper[5039]: I1124 15:28:46.385610 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4kjnn" Nov 24 15:28:46 crc kubenswrapper[5039]: I1124 15:28:46.951371 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4kjnn"] Nov 24 15:28:47 crc kubenswrapper[5039]: I1124 15:28:47.391103 5039 generic.go:334] "Generic (PLEG): container finished" podID="2afaaa34-5c21-4785-a786-98eaa5fa2e83" containerID="408f6475936cdf298bd2fad3c8e1a6f710c00d07a5c7ad0a67e5d20b5d4c5fee" exitCode=0 Nov 24 15:28:47 crc kubenswrapper[5039]: I1124 15:28:47.391184 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4kjnn" event={"ID":"2afaaa34-5c21-4785-a786-98eaa5fa2e83","Type":"ContainerDied","Data":"408f6475936cdf298bd2fad3c8e1a6f710c00d07a5c7ad0a67e5d20b5d4c5fee"} Nov 24 15:28:47 crc kubenswrapper[5039]: I1124 15:28:47.391412 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4kjnn" event={"ID":"2afaaa34-5c21-4785-a786-98eaa5fa2e83","Type":"ContainerStarted","Data":"adfc165c2106695dbd34d7ebecbd9b36c2a5a5b9e9ae77d366e128b017e3469f"} Nov 24 15:28:48 crc kubenswrapper[5039]: I1124 15:28:48.405700 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4kjnn" event={"ID":"2afaaa34-5c21-4785-a786-98eaa5fa2e83","Type":"ContainerStarted","Data":"1da29050a878ad5d680a39cbbd8b7131e904c0ef74c3f9e825da770a150f65aa"} Nov 24 15:28:50 crc kubenswrapper[5039]: I1124 15:28:50.426211 5039 generic.go:334] "Generic (PLEG): container finished" podID="2afaaa34-5c21-4785-a786-98eaa5fa2e83" containerID="1da29050a878ad5d680a39cbbd8b7131e904c0ef74c3f9e825da770a150f65aa" exitCode=0 Nov 24 15:28:50 crc kubenswrapper[5039]: I1124 15:28:50.426305 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4kjnn" event={"ID":"2afaaa34-5c21-4785-a786-98eaa5fa2e83","Type":"ContainerDied","Data":"1da29050a878ad5d680a39cbbd8b7131e904c0ef74c3f9e825da770a150f65aa"} Nov 24 15:28:51 crc kubenswrapper[5039]: I1124 15:28:51.440983 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4kjnn" event={"ID":"2afaaa34-5c21-4785-a786-98eaa5fa2e83","Type":"ContainerStarted","Data":"c452e08b293c0c14bb5f4b898c9961afa4171d57a4e5cb05acf9111ed543f60e"} Nov 24 15:28:51 crc kubenswrapper[5039]: I1124 15:28:51.475513 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4kjnn" podStartSLOduration=2.051402038 podStartE2EDuration="5.475476099s" podCreationTimestamp="2025-11-24 15:28:46 +0000 UTC" firstStartedPulling="2025-11-24 15:28:47.393633244 +0000 UTC m=+7839.832757734" lastFinishedPulling="2025-11-24 15:28:50.817707285 +0000 UTC m=+7843.256831795" observedRunningTime="2025-11-24 15:28:51.459857638 +0000 UTC m=+7843.898982148" watchObservedRunningTime="2025-11-24 15:28:51.475476099 +0000 UTC m=+7843.914600609" Nov 24 15:28:53 crc kubenswrapper[5039]: I1124 15:28:53.307076 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:28:53 crc kubenswrapper[5039]: E1124 15:28:53.308597 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:28:56 crc kubenswrapper[5039]: I1124 15:28:56.385826 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4kjnn" Nov 24 15:28:56 crc kubenswrapper[5039]: I1124 15:28:56.386462 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4kjnn" Nov 24 15:28:57 crc kubenswrapper[5039]: I1124 15:28:57.442760 5039 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-4kjnn" podUID="2afaaa34-5c21-4785-a786-98eaa5fa2e83" containerName="registry-server" probeResult="failure" output=< Nov 24 15:28:57 crc kubenswrapper[5039]: timeout: failed to connect service ":50051" within 1s Nov 24 15:28:57 crc kubenswrapper[5039]: > Nov 24 15:28:57 crc kubenswrapper[5039]: I1124 15:28:57.580859 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-4qp6m_1988c73c-a04d-4b50-af92-54dfc2a4a262/control-plane-machine-set-operator/0.log" Nov 24 15:28:57 crc kubenswrapper[5039]: I1124 15:28:57.703642 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-n2dwh_ae5ca663-7edb-49dd-a7a7-668eeace13f7/kube-rbac-proxy/0.log" Nov 24 15:28:57 crc kubenswrapper[5039]: I1124 15:28:57.795953 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-n2dwh_ae5ca663-7edb-49dd-a7a7-668eeace13f7/machine-api-operator/0.log" Nov 24 15:29:06 crc kubenswrapper[5039]: I1124 15:29:06.441894 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4kjnn" Nov 24 15:29:06 crc kubenswrapper[5039]: I1124 15:29:06.507401 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4kjnn" Nov 24 15:29:06 crc kubenswrapper[5039]: I1124 15:29:06.685037 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4kjnn"] Nov 24 15:29:07 crc kubenswrapper[5039]: I1124 15:29:07.590281 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4kjnn" podUID="2afaaa34-5c21-4785-a786-98eaa5fa2e83" containerName="registry-server" containerID="cri-o://c452e08b293c0c14bb5f4b898c9961afa4171d57a4e5cb05acf9111ed543f60e" gracePeriod=2 Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.100066 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4kjnn" Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.237634 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5v57w\" (UniqueName: \"kubernetes.io/projected/2afaaa34-5c21-4785-a786-98eaa5fa2e83-kube-api-access-5v57w\") pod \"2afaaa34-5c21-4785-a786-98eaa5fa2e83\" (UID: \"2afaaa34-5c21-4785-a786-98eaa5fa2e83\") " Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.237749 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2afaaa34-5c21-4785-a786-98eaa5fa2e83-utilities\") pod \"2afaaa34-5c21-4785-a786-98eaa5fa2e83\" (UID: \"2afaaa34-5c21-4785-a786-98eaa5fa2e83\") " Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.237814 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2afaaa34-5c21-4785-a786-98eaa5fa2e83-catalog-content\") pod \"2afaaa34-5c21-4785-a786-98eaa5fa2e83\" (UID: \"2afaaa34-5c21-4785-a786-98eaa5fa2e83\") " Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.238777 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2afaaa34-5c21-4785-a786-98eaa5fa2e83-utilities" (OuterVolumeSpecName: "utilities") pod "2afaaa34-5c21-4785-a786-98eaa5fa2e83" (UID: "2afaaa34-5c21-4785-a786-98eaa5fa2e83"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.244321 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2afaaa34-5c21-4785-a786-98eaa5fa2e83-kube-api-access-5v57w" (OuterVolumeSpecName: "kube-api-access-5v57w") pod "2afaaa34-5c21-4785-a786-98eaa5fa2e83" (UID: "2afaaa34-5c21-4785-a786-98eaa5fa2e83"). InnerVolumeSpecName "kube-api-access-5v57w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.282854 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2afaaa34-5c21-4785-a786-98eaa5fa2e83-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2afaaa34-5c21-4785-a786-98eaa5fa2e83" (UID: "2afaaa34-5c21-4785-a786-98eaa5fa2e83"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.316651 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:29:08 crc kubenswrapper[5039]: E1124 15:29:08.316963 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.340594 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2afaaa34-5c21-4785-a786-98eaa5fa2e83-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.340628 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5v57w\" (UniqueName: \"kubernetes.io/projected/2afaaa34-5c21-4785-a786-98eaa5fa2e83-kube-api-access-5v57w\") on node \"crc\" DevicePath \"\"" Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.340645 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2afaaa34-5c21-4785-a786-98eaa5fa2e83-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.609623 5039 generic.go:334] "Generic (PLEG): container finished" podID="2afaaa34-5c21-4785-a786-98eaa5fa2e83" containerID="c452e08b293c0c14bb5f4b898c9961afa4171d57a4e5cb05acf9111ed543f60e" exitCode=0 Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.609645 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4kjnn" Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.609679 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4kjnn" event={"ID":"2afaaa34-5c21-4785-a786-98eaa5fa2e83","Type":"ContainerDied","Data":"c452e08b293c0c14bb5f4b898c9961afa4171d57a4e5cb05acf9111ed543f60e"} Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.609765 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4kjnn" event={"ID":"2afaaa34-5c21-4785-a786-98eaa5fa2e83","Type":"ContainerDied","Data":"adfc165c2106695dbd34d7ebecbd9b36c2a5a5b9e9ae77d366e128b017e3469f"} Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.609793 5039 scope.go:117] "RemoveContainer" containerID="c452e08b293c0c14bb5f4b898c9961afa4171d57a4e5cb05acf9111ed543f60e" Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.639512 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4kjnn"] Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.644068 5039 scope.go:117] "RemoveContainer" containerID="1da29050a878ad5d680a39cbbd8b7131e904c0ef74c3f9e825da770a150f65aa" Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.648878 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4kjnn"] Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.662697 5039 scope.go:117] "RemoveContainer" containerID="408f6475936cdf298bd2fad3c8e1a6f710c00d07a5c7ad0a67e5d20b5d4c5fee" Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.718554 5039 scope.go:117] "RemoveContainer" containerID="c452e08b293c0c14bb5f4b898c9961afa4171d57a4e5cb05acf9111ed543f60e" Nov 24 15:29:08 crc kubenswrapper[5039]: E1124 15:29:08.719541 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c452e08b293c0c14bb5f4b898c9961afa4171d57a4e5cb05acf9111ed543f60e\": container with ID starting with c452e08b293c0c14bb5f4b898c9961afa4171d57a4e5cb05acf9111ed543f60e not found: ID does not exist" containerID="c452e08b293c0c14bb5f4b898c9961afa4171d57a4e5cb05acf9111ed543f60e" Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.719596 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c452e08b293c0c14bb5f4b898c9961afa4171d57a4e5cb05acf9111ed543f60e"} err="failed to get container status \"c452e08b293c0c14bb5f4b898c9961afa4171d57a4e5cb05acf9111ed543f60e\": rpc error: code = NotFound desc = could not find container \"c452e08b293c0c14bb5f4b898c9961afa4171d57a4e5cb05acf9111ed543f60e\": container with ID starting with c452e08b293c0c14bb5f4b898c9961afa4171d57a4e5cb05acf9111ed543f60e not found: ID does not exist" Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.719630 5039 scope.go:117] "RemoveContainer" containerID="1da29050a878ad5d680a39cbbd8b7131e904c0ef74c3f9e825da770a150f65aa" Nov 24 15:29:08 crc kubenswrapper[5039]: E1124 15:29:08.720027 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1da29050a878ad5d680a39cbbd8b7131e904c0ef74c3f9e825da770a150f65aa\": container with ID starting with 1da29050a878ad5d680a39cbbd8b7131e904c0ef74c3f9e825da770a150f65aa not found: ID does not exist" containerID="1da29050a878ad5d680a39cbbd8b7131e904c0ef74c3f9e825da770a150f65aa" Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.720119 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1da29050a878ad5d680a39cbbd8b7131e904c0ef74c3f9e825da770a150f65aa"} err="failed to get container status \"1da29050a878ad5d680a39cbbd8b7131e904c0ef74c3f9e825da770a150f65aa\": rpc error: code = NotFound desc = could not find container \"1da29050a878ad5d680a39cbbd8b7131e904c0ef74c3f9e825da770a150f65aa\": container with ID starting with 1da29050a878ad5d680a39cbbd8b7131e904c0ef74c3f9e825da770a150f65aa not found: ID does not exist" Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.720192 5039 scope.go:117] "RemoveContainer" containerID="408f6475936cdf298bd2fad3c8e1a6f710c00d07a5c7ad0a67e5d20b5d4c5fee" Nov 24 15:29:08 crc kubenswrapper[5039]: E1124 15:29:08.720672 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"408f6475936cdf298bd2fad3c8e1a6f710c00d07a5c7ad0a67e5d20b5d4c5fee\": container with ID starting with 408f6475936cdf298bd2fad3c8e1a6f710c00d07a5c7ad0a67e5d20b5d4c5fee not found: ID does not exist" containerID="408f6475936cdf298bd2fad3c8e1a6f710c00d07a5c7ad0a67e5d20b5d4c5fee" Nov 24 15:29:08 crc kubenswrapper[5039]: I1124 15:29:08.720717 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"408f6475936cdf298bd2fad3c8e1a6f710c00d07a5c7ad0a67e5d20b5d4c5fee"} err="failed to get container status \"408f6475936cdf298bd2fad3c8e1a6f710c00d07a5c7ad0a67e5d20b5d4c5fee\": rpc error: code = NotFound desc = could not find container \"408f6475936cdf298bd2fad3c8e1a6f710c00d07a5c7ad0a67e5d20b5d4c5fee\": container with ID starting with 408f6475936cdf298bd2fad3c8e1a6f710c00d07a5c7ad0a67e5d20b5d4c5fee not found: ID does not exist" Nov 24 15:29:10 crc kubenswrapper[5039]: I1124 15:29:10.319938 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2afaaa34-5c21-4785-a786-98eaa5fa2e83" path="/var/lib/kubelet/pods/2afaaa34-5c21-4785-a786-98eaa5fa2e83/volumes" Nov 24 15:29:11 crc kubenswrapper[5039]: I1124 15:29:11.078740 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-8kpc5_ab33654d-a27e-4922-87c3-37d387a8dfa6/cert-manager-controller/0.log" Nov 24 15:29:11 crc kubenswrapper[5039]: I1124 15:29:11.249895 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-dwbfg_e8bfdf0d-df1c-4dda-8c3d-8113eee0ad4a/cert-manager-cainjector/0.log" Nov 24 15:29:11 crc kubenswrapper[5039]: I1124 15:29:11.293418 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-9rmz6_2526d128-0579-4f6f-9327-12ac7fe30e96/cert-manager-webhook/0.log" Nov 24 15:29:20 crc kubenswrapper[5039]: I1124 15:29:20.307561 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:29:20 crc kubenswrapper[5039]: E1124 15:29:20.308654 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:29:24 crc kubenswrapper[5039]: I1124 15:29:24.456198 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-9dqz9_1eee0206-49ea-45f5-8c34-547075ba3c65/nmstate-console-plugin/0.log" Nov 24 15:29:24 crc kubenswrapper[5039]: I1124 15:29:24.608706 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-ptxfm_e0e36cbb-009a-4784-a04d-95badbce22d0/nmstate-handler/0.log" Nov 24 15:29:24 crc kubenswrapper[5039]: I1124 15:29:24.658566 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-fnjvc_f006222b-71be-4a99-9b20-e048040bd042/kube-rbac-proxy/0.log" Nov 24 15:29:24 crc kubenswrapper[5039]: I1124 15:29:24.690790 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-fnjvc_f006222b-71be-4a99-9b20-e048040bd042/nmstate-metrics/0.log" Nov 24 15:29:24 crc kubenswrapper[5039]: I1124 15:29:24.874178 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-55t2b_a78868c4-aedd-4fe3-a055-5460cac9f6c4/nmstate-operator/0.log" Nov 24 15:29:24 crc kubenswrapper[5039]: I1124 15:29:24.896912 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-j8g7c_667e819e-83fc-453d-90d3-7b89b63e15a4/nmstate-webhook/0.log" Nov 24 15:29:34 crc kubenswrapper[5039]: I1124 15:29:34.308086 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:29:34 crc kubenswrapper[5039]: E1124 15:29:34.309144 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:29:38 crc kubenswrapper[5039]: I1124 15:29:38.295053 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-dfbf69d45-vngzb_e97f0fac-4f42-4ea9-b853-33c7aedeba68/kube-rbac-proxy/0.log" Nov 24 15:29:38 crc kubenswrapper[5039]: I1124 15:29:38.317099 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-dfbf69d45-vngzb_e97f0fac-4f42-4ea9-b853-33c7aedeba68/manager/0.log" Nov 24 15:29:49 crc kubenswrapper[5039]: I1124 15:29:49.306992 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:29:49 crc kubenswrapper[5039]: E1124 15:29:49.308099 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:29:53 crc kubenswrapper[5039]: I1124 15:29:53.166551 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_cluster-logging-operator-ff9846bd-8d4cv_ecb03566-7ffa-42ab-aa02-22bad9858b86/cluster-logging-operator/0.log" Nov 24 15:29:53 crc kubenswrapper[5039]: I1124 15:29:53.345687 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_collector-cqptg_a477b3d9-ef5d-4254-bc37-62f62a3ac851/collector/0.log" Nov 24 15:29:53 crc kubenswrapper[5039]: I1124 15:29:53.435565 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-compactor-0_a1e6b0b7-32a0-465a-a329-d060dbf0b8f9/loki-compactor/0.log" Nov 24 15:29:53 crc kubenswrapper[5039]: I1124 15:29:53.539255 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-distributor-76cc67bf56-xwwg7_37a01398-aa18-423a-8fa0-b3d1f5fe0cfd/loki-distributor/0.log" Nov 24 15:29:53 crc kubenswrapper[5039]: I1124 15:29:53.633591 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-797bc7dfc5-fjsvj_f025c7ee-097c-4915-9946-41b57f995f0d/gateway/0.log" Nov 24 15:29:53 crc kubenswrapper[5039]: I1124 15:29:53.648702 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-797bc7dfc5-fjsvj_f025c7ee-097c-4915-9946-41b57f995f0d/opa/0.log" Nov 24 15:29:53 crc kubenswrapper[5039]: I1124 15:29:53.819076 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-797bc7dfc5-zd8bg_c24389d6-c229-4c2b-9933-61cd5f9b81d3/gateway/0.log" Nov 24 15:29:53 crc kubenswrapper[5039]: I1124 15:29:53.835186 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-797bc7dfc5-zd8bg_c24389d6-c229-4c2b-9933-61cd5f9b81d3/opa/0.log" Nov 24 15:29:53 crc kubenswrapper[5039]: I1124 15:29:53.995106 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-index-gateway-0_771e7704-d64a-4536-adfb-2ca0e6356956/loki-index-gateway/0.log" Nov 24 15:29:54 crc kubenswrapper[5039]: I1124 15:29:54.103896 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-ingester-0_ffdab614-73c1-4ac9-adba-d2ec7ce81550/loki-ingester/0.log" Nov 24 15:29:54 crc kubenswrapper[5039]: I1124 15:29:54.220109 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-querier-5895d59bb8-4xrbf_f3453a5d-07da-4391-a2d5-df5154962b61/loki-querier/0.log" Nov 24 15:29:54 crc kubenswrapper[5039]: I1124 15:29:54.305060 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-query-frontend-84558f7c9f-qvz9s_0c93d652-05e2-4359-b5f6-6951e26ba7d2/loki-query-frontend/0.log" Nov 24 15:30:00 crc kubenswrapper[5039]: I1124 15:30:00.163305 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399970-rwl8h"] Nov 24 15:30:00 crc kubenswrapper[5039]: E1124 15:30:00.164268 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2afaaa34-5c21-4785-a786-98eaa5fa2e83" containerName="extract-utilities" Nov 24 15:30:00 crc kubenswrapper[5039]: I1124 15:30:00.164282 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="2afaaa34-5c21-4785-a786-98eaa5fa2e83" containerName="extract-utilities" Nov 24 15:30:00 crc kubenswrapper[5039]: E1124 15:30:00.164294 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2afaaa34-5c21-4785-a786-98eaa5fa2e83" containerName="registry-server" Nov 24 15:30:00 crc kubenswrapper[5039]: I1124 15:30:00.164300 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="2afaaa34-5c21-4785-a786-98eaa5fa2e83" containerName="registry-server" Nov 24 15:30:00 crc kubenswrapper[5039]: E1124 15:30:00.164319 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2afaaa34-5c21-4785-a786-98eaa5fa2e83" containerName="extract-content" Nov 24 15:30:00 crc kubenswrapper[5039]: I1124 15:30:00.164325 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="2afaaa34-5c21-4785-a786-98eaa5fa2e83" containerName="extract-content" Nov 24 15:30:00 crc kubenswrapper[5039]: I1124 15:30:00.164582 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="2afaaa34-5c21-4785-a786-98eaa5fa2e83" containerName="registry-server" Nov 24 15:30:00 crc kubenswrapper[5039]: I1124 15:30:00.165305 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399970-rwl8h" Nov 24 15:30:00 crc kubenswrapper[5039]: I1124 15:30:00.168973 5039 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 15:30:00 crc kubenswrapper[5039]: I1124 15:30:00.170064 5039 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 15:30:00 crc kubenswrapper[5039]: I1124 15:30:00.187991 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399970-rwl8h"] Nov 24 15:30:00 crc kubenswrapper[5039]: I1124 15:30:00.226865 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e471579c-a9d3-458c-9149-99a9f77c416e-config-volume\") pod \"collect-profiles-29399970-rwl8h\" (UID: \"e471579c-a9d3-458c-9149-99a9f77c416e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399970-rwl8h" Nov 24 15:30:00 crc kubenswrapper[5039]: I1124 15:30:00.227030 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vff97\" (UniqueName: \"kubernetes.io/projected/e471579c-a9d3-458c-9149-99a9f77c416e-kube-api-access-vff97\") pod \"collect-profiles-29399970-rwl8h\" (UID: \"e471579c-a9d3-458c-9149-99a9f77c416e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399970-rwl8h" Nov 24 15:30:00 crc kubenswrapper[5039]: I1124 15:30:00.227409 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e471579c-a9d3-458c-9149-99a9f77c416e-secret-volume\") pod \"collect-profiles-29399970-rwl8h\" (UID: \"e471579c-a9d3-458c-9149-99a9f77c416e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399970-rwl8h" Nov 24 15:30:00 crc kubenswrapper[5039]: I1124 15:30:00.329559 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e471579c-a9d3-458c-9149-99a9f77c416e-secret-volume\") pod \"collect-profiles-29399970-rwl8h\" (UID: \"e471579c-a9d3-458c-9149-99a9f77c416e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399970-rwl8h" Nov 24 15:30:00 crc kubenswrapper[5039]: I1124 15:30:00.329622 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e471579c-a9d3-458c-9149-99a9f77c416e-config-volume\") pod \"collect-profiles-29399970-rwl8h\" (UID: \"e471579c-a9d3-458c-9149-99a9f77c416e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399970-rwl8h" Nov 24 15:30:00 crc kubenswrapper[5039]: I1124 15:30:00.329715 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vff97\" (UniqueName: \"kubernetes.io/projected/e471579c-a9d3-458c-9149-99a9f77c416e-kube-api-access-vff97\") pod \"collect-profiles-29399970-rwl8h\" (UID: \"e471579c-a9d3-458c-9149-99a9f77c416e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399970-rwl8h" Nov 24 15:30:00 crc kubenswrapper[5039]: I1124 15:30:00.330728 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e471579c-a9d3-458c-9149-99a9f77c416e-config-volume\") pod \"collect-profiles-29399970-rwl8h\" (UID: \"e471579c-a9d3-458c-9149-99a9f77c416e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399970-rwl8h" Nov 24 15:30:00 crc kubenswrapper[5039]: I1124 15:30:00.334998 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e471579c-a9d3-458c-9149-99a9f77c416e-secret-volume\") pod \"collect-profiles-29399970-rwl8h\" (UID: \"e471579c-a9d3-458c-9149-99a9f77c416e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399970-rwl8h" Nov 24 15:30:00 crc kubenswrapper[5039]: I1124 15:30:00.346719 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vff97\" (UniqueName: \"kubernetes.io/projected/e471579c-a9d3-458c-9149-99a9f77c416e-kube-api-access-vff97\") pod \"collect-profiles-29399970-rwl8h\" (UID: \"e471579c-a9d3-458c-9149-99a9f77c416e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399970-rwl8h" Nov 24 15:30:00 crc kubenswrapper[5039]: I1124 15:30:00.486768 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399970-rwl8h" Nov 24 15:30:00 crc kubenswrapper[5039]: I1124 15:30:00.979135 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399970-rwl8h"] Nov 24 15:30:01 crc kubenswrapper[5039]: I1124 15:30:01.234554 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399970-rwl8h" event={"ID":"e471579c-a9d3-458c-9149-99a9f77c416e","Type":"ContainerStarted","Data":"7fc1dfa098251ce0d4f47b3e3a81865ae684d08b86f662c032cd4b9e38594dbf"} Nov 24 15:30:01 crc kubenswrapper[5039]: I1124 15:30:01.235419 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399970-rwl8h" event={"ID":"e471579c-a9d3-458c-9149-99a9f77c416e","Type":"ContainerStarted","Data":"1367f4e6f3acf6096d22843db543b728d164861e87e5474a27a98533908276ee"} Nov 24 15:30:01 crc kubenswrapper[5039]: I1124 15:30:01.256490 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29399970-rwl8h" podStartSLOduration=1.256471135 podStartE2EDuration="1.256471135s" podCreationTimestamp="2025-11-24 15:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 15:30:01.249205366 +0000 UTC m=+7913.688329886" watchObservedRunningTime="2025-11-24 15:30:01.256471135 +0000 UTC m=+7913.695595635" Nov 24 15:30:01 crc kubenswrapper[5039]: I1124 15:30:01.876931 5039 scope.go:117] "RemoveContainer" containerID="3bdde357ab9cd3b8409e91a4f3ee02f91bc49098ed21b0f945c9408feb095aab" Nov 24 15:30:01 crc kubenswrapper[5039]: I1124 15:30:01.899349 5039 scope.go:117] "RemoveContainer" containerID="66d6602fd80785d21232041a1e3ce588e0a878d9ed984b4531e1d260c81cb1d1" Nov 24 15:30:01 crc kubenswrapper[5039]: I1124 15:30:01.932759 5039 scope.go:117] "RemoveContainer" containerID="7a7ed4ec97fa3fe38858c6fd033607fcf3eb28e0b05b8ed51f25609c20eee193" Nov 24 15:30:02 crc kubenswrapper[5039]: I1124 15:30:02.250938 5039 generic.go:334] "Generic (PLEG): container finished" podID="e471579c-a9d3-458c-9149-99a9f77c416e" containerID="7fc1dfa098251ce0d4f47b3e3a81865ae684d08b86f662c032cd4b9e38594dbf" exitCode=0 Nov 24 15:30:02 crc kubenswrapper[5039]: I1124 15:30:02.250980 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399970-rwl8h" event={"ID":"e471579c-a9d3-458c-9149-99a9f77c416e","Type":"ContainerDied","Data":"7fc1dfa098251ce0d4f47b3e3a81865ae684d08b86f662c032cd4b9e38594dbf"} Nov 24 15:30:03 crc kubenswrapper[5039]: I1124 15:30:03.665521 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399970-rwl8h" Nov 24 15:30:03 crc kubenswrapper[5039]: I1124 15:30:03.808433 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e471579c-a9d3-458c-9149-99a9f77c416e-secret-volume\") pod \"e471579c-a9d3-458c-9149-99a9f77c416e\" (UID: \"e471579c-a9d3-458c-9149-99a9f77c416e\") " Nov 24 15:30:03 crc kubenswrapper[5039]: I1124 15:30:03.808879 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e471579c-a9d3-458c-9149-99a9f77c416e-config-volume\") pod \"e471579c-a9d3-458c-9149-99a9f77c416e\" (UID: \"e471579c-a9d3-458c-9149-99a9f77c416e\") " Nov 24 15:30:03 crc kubenswrapper[5039]: I1124 15:30:03.809010 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vff97\" (UniqueName: \"kubernetes.io/projected/e471579c-a9d3-458c-9149-99a9f77c416e-kube-api-access-vff97\") pod \"e471579c-a9d3-458c-9149-99a9f77c416e\" (UID: \"e471579c-a9d3-458c-9149-99a9f77c416e\") " Nov 24 15:30:03 crc kubenswrapper[5039]: I1124 15:30:03.811076 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e471579c-a9d3-458c-9149-99a9f77c416e-config-volume" (OuterVolumeSpecName: "config-volume") pod "e471579c-a9d3-458c-9149-99a9f77c416e" (UID: "e471579c-a9d3-458c-9149-99a9f77c416e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 15:30:03 crc kubenswrapper[5039]: I1124 15:30:03.816610 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e471579c-a9d3-458c-9149-99a9f77c416e-kube-api-access-vff97" (OuterVolumeSpecName: "kube-api-access-vff97") pod "e471579c-a9d3-458c-9149-99a9f77c416e" (UID: "e471579c-a9d3-458c-9149-99a9f77c416e"). InnerVolumeSpecName "kube-api-access-vff97". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:30:03 crc kubenswrapper[5039]: I1124 15:30:03.817683 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e471579c-a9d3-458c-9149-99a9f77c416e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e471579c-a9d3-458c-9149-99a9f77c416e" (UID: "e471579c-a9d3-458c-9149-99a9f77c416e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 15:30:03 crc kubenswrapper[5039]: I1124 15:30:03.912244 5039 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e471579c-a9d3-458c-9149-99a9f77c416e-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 15:30:03 crc kubenswrapper[5039]: I1124 15:30:03.912294 5039 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e471579c-a9d3-458c-9149-99a9f77c416e-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 15:30:03 crc kubenswrapper[5039]: I1124 15:30:03.912310 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vff97\" (UniqueName: \"kubernetes.io/projected/e471579c-a9d3-458c-9149-99a9f77c416e-kube-api-access-vff97\") on node \"crc\" DevicePath \"\"" Nov 24 15:30:04 crc kubenswrapper[5039]: I1124 15:30:04.274426 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399970-rwl8h" event={"ID":"e471579c-a9d3-458c-9149-99a9f77c416e","Type":"ContainerDied","Data":"1367f4e6f3acf6096d22843db543b728d164861e87e5474a27a98533908276ee"} Nov 24 15:30:04 crc kubenswrapper[5039]: I1124 15:30:04.274469 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1367f4e6f3acf6096d22843db543b728d164861e87e5474a27a98533908276ee" Nov 24 15:30:04 crc kubenswrapper[5039]: I1124 15:30:04.274535 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399970-rwl8h" Nov 24 15:30:04 crc kubenswrapper[5039]: I1124 15:30:04.306919 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:30:04 crc kubenswrapper[5039]: E1124 15:30:04.307401 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:30:04 crc kubenswrapper[5039]: I1124 15:30:04.365833 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399925-xcwb6"] Nov 24 15:30:04 crc kubenswrapper[5039]: I1124 15:30:04.376044 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399925-xcwb6"] Nov 24 15:30:06 crc kubenswrapper[5039]: I1124 15:30:06.326530 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="251b2e5c-beac-4081-ba13-c9c21e99d0af" path="/var/lib/kubelet/pods/251b2e5c-beac-4081-ba13-c9c21e99d0af/volumes" Nov 24 15:30:08 crc kubenswrapper[5039]: I1124 15:30:08.669639 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-m58j2_3e29f306-3558-4854-9ada-3ff94d2ad700/kube-rbac-proxy/0.log" Nov 24 15:30:08 crc kubenswrapper[5039]: I1124 15:30:08.726705 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-m58j2_3e29f306-3558-4854-9ada-3ff94d2ad700/controller/0.log" Nov 24 15:30:08 crc kubenswrapper[5039]: I1124 15:30:08.889613 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-frr-files/0.log" Nov 24 15:30:09 crc kubenswrapper[5039]: I1124 15:30:09.074015 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-frr-files/0.log" Nov 24 15:30:09 crc kubenswrapper[5039]: I1124 15:30:09.116327 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-metrics/0.log" Nov 24 15:30:09 crc kubenswrapper[5039]: I1124 15:30:09.122054 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-reloader/0.log" Nov 24 15:30:09 crc kubenswrapper[5039]: I1124 15:30:09.124305 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-reloader/0.log" Nov 24 15:30:09 crc kubenswrapper[5039]: I1124 15:30:09.286857 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-reloader/0.log" Nov 24 15:30:09 crc kubenswrapper[5039]: I1124 15:30:09.287179 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-frr-files/0.log" Nov 24 15:30:09 crc kubenswrapper[5039]: I1124 15:30:09.362018 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-metrics/0.log" Nov 24 15:30:09 crc kubenswrapper[5039]: I1124 15:30:09.382863 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-metrics/0.log" Nov 24 15:30:09 crc kubenswrapper[5039]: I1124 15:30:09.527342 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-frr-files/0.log" Nov 24 15:30:09 crc kubenswrapper[5039]: I1124 15:30:09.537928 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-reloader/0.log" Nov 24 15:30:09 crc kubenswrapper[5039]: I1124 15:30:09.581330 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/controller/0.log" Nov 24 15:30:09 crc kubenswrapper[5039]: I1124 15:30:09.586281 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/cp-metrics/0.log" Nov 24 15:30:09 crc kubenswrapper[5039]: I1124 15:30:09.769889 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/kube-rbac-proxy/0.log" Nov 24 15:30:09 crc kubenswrapper[5039]: I1124 15:30:09.791919 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/frr-metrics/0.log" Nov 24 15:30:09 crc kubenswrapper[5039]: I1124 15:30:09.862789 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/kube-rbac-proxy-frr/0.log" Nov 24 15:30:10 crc kubenswrapper[5039]: I1124 15:30:10.025527 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/reloader/0.log" Nov 24 15:30:10 crc kubenswrapper[5039]: I1124 15:30:10.124183 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-kn25p_d341f082-ff80-43f0-aa5c-1476f8addb05/frr-k8s-webhook-server/0.log" Nov 24 15:30:10 crc kubenswrapper[5039]: I1124 15:30:10.331281 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-859686c6ff-hskrv_4af724f9-39c3-414e-a020-29da6a5bfac7/manager/0.log" Nov 24 15:30:10 crc kubenswrapper[5039]: I1124 15:30:10.475703 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-76f55458ff-26z8q_18cdfd31-117a-4b07-bdba-fc6703fcfa55/webhook-server/0.log" Nov 24 15:30:10 crc kubenswrapper[5039]: I1124 15:30:10.732460 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-7wr9z_fa0ca8a9-96d3-40dc-916f-97048b7112b0/kube-rbac-proxy/0.log" Nov 24 15:30:11 crc kubenswrapper[5039]: I1124 15:30:11.287105 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-7wr9z_fa0ca8a9-96d3-40dc-916f-97048b7112b0/speaker/0.log" Nov 24 15:30:11 crc kubenswrapper[5039]: I1124 15:30:11.916426 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-2zrlg_a6e7ae26-ae37-48c9-97ca-917a4e92a535/frr/0.log" Nov 24 15:30:17 crc kubenswrapper[5039]: I1124 15:30:17.306680 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:30:17 crc kubenswrapper[5039]: E1124 15:30:17.307531 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:30:25 crc kubenswrapper[5039]: I1124 15:30:25.314538 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz_d615d58f-8a19-4226-a022-26c3c2f46eaa/util/0.log" Nov 24 15:30:25 crc kubenswrapper[5039]: I1124 15:30:25.458997 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz_d615d58f-8a19-4226-a022-26c3c2f46eaa/util/0.log" Nov 24 15:30:25 crc kubenswrapper[5039]: I1124 15:30:25.512799 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz_d615d58f-8a19-4226-a022-26c3c2f46eaa/pull/0.log" Nov 24 15:30:25 crc kubenswrapper[5039]: I1124 15:30:25.516676 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz_d615d58f-8a19-4226-a022-26c3c2f46eaa/pull/0.log" Nov 24 15:30:25 crc kubenswrapper[5039]: I1124 15:30:25.647293 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz_d615d58f-8a19-4226-a022-26c3c2f46eaa/util/0.log" Nov 24 15:30:25 crc kubenswrapper[5039]: I1124 15:30:25.695006 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz_d615d58f-8a19-4226-a022-26c3c2f46eaa/pull/0.log" Nov 24 15:30:25 crc kubenswrapper[5039]: I1124 15:30:25.710074 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8v6dgz_d615d58f-8a19-4226-a022-26c3c2f46eaa/extract/0.log" Nov 24 15:30:25 crc kubenswrapper[5039]: I1124 15:30:25.815597 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb_175053d9-6995-4edc-9e0b-a72a0e10ae72/util/0.log" Nov 24 15:30:26 crc kubenswrapper[5039]: I1124 15:30:26.044012 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb_175053d9-6995-4edc-9e0b-a72a0e10ae72/util/0.log" Nov 24 15:30:26 crc kubenswrapper[5039]: I1124 15:30:26.046931 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb_175053d9-6995-4edc-9e0b-a72a0e10ae72/pull/0.log" Nov 24 15:30:26 crc kubenswrapper[5039]: I1124 15:30:26.097574 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb_175053d9-6995-4edc-9e0b-a72a0e10ae72/pull/0.log" Nov 24 15:30:26 crc kubenswrapper[5039]: I1124 15:30:26.271334 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb_175053d9-6995-4edc-9e0b-a72a0e10ae72/pull/0.log" Nov 24 15:30:26 crc kubenswrapper[5039]: I1124 15:30:26.272057 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb_175053d9-6995-4edc-9e0b-a72a0e10ae72/extract/0.log" Nov 24 15:30:26 crc kubenswrapper[5039]: I1124 15:30:26.284480 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e8jgtb_175053d9-6995-4edc-9e0b-a72a0e10ae72/util/0.log" Nov 24 15:30:26 crc kubenswrapper[5039]: I1124 15:30:26.452184 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b_f60bd1ab-ddc1-462f-85f9-e47d7305727d/util/0.log" Nov 24 15:30:26 crc kubenswrapper[5039]: I1124 15:30:26.607108 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b_f60bd1ab-ddc1-462f-85f9-e47d7305727d/pull/0.log" Nov 24 15:30:26 crc kubenswrapper[5039]: I1124 15:30:26.637437 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b_f60bd1ab-ddc1-462f-85f9-e47d7305727d/util/0.log" Nov 24 15:30:26 crc kubenswrapper[5039]: I1124 15:30:26.657427 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b_f60bd1ab-ddc1-462f-85f9-e47d7305727d/pull/0.log" Nov 24 15:30:27 crc kubenswrapper[5039]: I1124 15:30:27.018047 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b_f60bd1ab-ddc1-462f-85f9-e47d7305727d/util/0.log" Nov 24 15:30:27 crc kubenswrapper[5039]: I1124 15:30:27.022613 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b_f60bd1ab-ddc1-462f-85f9-e47d7305727d/pull/0.log" Nov 24 15:30:27 crc kubenswrapper[5039]: I1124 15:30:27.032460 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nwg9b_f60bd1ab-ddc1-462f-85f9-e47d7305727d/extract/0.log" Nov 24 15:30:27 crc kubenswrapper[5039]: I1124 15:30:27.177371 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6_8153476f-9f52-4a9b-9976-f71664f6f667/util/0.log" Nov 24 15:30:27 crc kubenswrapper[5039]: I1124 15:30:27.371923 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6_8153476f-9f52-4a9b-9976-f71664f6f667/pull/0.log" Nov 24 15:30:27 crc kubenswrapper[5039]: I1124 15:30:27.428777 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6_8153476f-9f52-4a9b-9976-f71664f6f667/pull/0.log" Nov 24 15:30:27 crc kubenswrapper[5039]: I1124 15:30:27.451539 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6_8153476f-9f52-4a9b-9976-f71664f6f667/util/0.log" Nov 24 15:30:27 crc kubenswrapper[5039]: I1124 15:30:27.634786 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6_8153476f-9f52-4a9b-9976-f71664f6f667/extract/0.log" Nov 24 15:30:27 crc kubenswrapper[5039]: I1124 15:30:27.643986 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6_8153476f-9f52-4a9b-9976-f71664f6f667/util/0.log" Nov 24 15:30:27 crc kubenswrapper[5039]: I1124 15:30:27.649295 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fs8wz6_8153476f-9f52-4a9b-9976-f71664f6f667/pull/0.log" Nov 24 15:30:27 crc kubenswrapper[5039]: I1124 15:30:27.811791 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2hsdg_71436ae2-8c4f-46bd-b877-b93ab84dbdac/extract-utilities/0.log" Nov 24 15:30:27 crc kubenswrapper[5039]: I1124 15:30:27.995257 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2hsdg_71436ae2-8c4f-46bd-b877-b93ab84dbdac/extract-utilities/0.log" Nov 24 15:30:28 crc kubenswrapper[5039]: I1124 15:30:28.009773 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2hsdg_71436ae2-8c4f-46bd-b877-b93ab84dbdac/extract-content/0.log" Nov 24 15:30:28 crc kubenswrapper[5039]: I1124 15:30:28.040096 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2hsdg_71436ae2-8c4f-46bd-b877-b93ab84dbdac/extract-content/0.log" Nov 24 15:30:28 crc kubenswrapper[5039]: I1124 15:30:28.192066 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2hsdg_71436ae2-8c4f-46bd-b877-b93ab84dbdac/extract-utilities/0.log" Nov 24 15:30:28 crc kubenswrapper[5039]: I1124 15:30:28.216803 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2hsdg_71436ae2-8c4f-46bd-b877-b93ab84dbdac/extract-content/0.log" Nov 24 15:30:28 crc kubenswrapper[5039]: I1124 15:30:28.372110 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2hsdg_71436ae2-8c4f-46bd-b877-b93ab84dbdac/registry-server/0.log" Nov 24 15:30:28 crc kubenswrapper[5039]: I1124 15:30:28.391032 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dlptg_592272ed-6a8c-42d9-8c87-b62ba335267c/extract-utilities/0.log" Nov 24 15:30:28 crc kubenswrapper[5039]: I1124 15:30:28.621360 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dlptg_592272ed-6a8c-42d9-8c87-b62ba335267c/extract-utilities/0.log" Nov 24 15:30:28 crc kubenswrapper[5039]: I1124 15:30:28.649602 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dlptg_592272ed-6a8c-42d9-8c87-b62ba335267c/extract-content/0.log" Nov 24 15:30:28 crc kubenswrapper[5039]: I1124 15:30:28.667071 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dlptg_592272ed-6a8c-42d9-8c87-b62ba335267c/extract-content/0.log" Nov 24 15:30:28 crc kubenswrapper[5039]: I1124 15:30:28.839055 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dlptg_592272ed-6a8c-42d9-8c87-b62ba335267c/extract-utilities/0.log" Nov 24 15:30:28 crc kubenswrapper[5039]: I1124 15:30:28.884716 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dlptg_592272ed-6a8c-42d9-8c87-b62ba335267c/extract-content/0.log" Nov 24 15:30:28 crc kubenswrapper[5039]: I1124 15:30:28.900069 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk_8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130/util/0.log" Nov 24 15:30:29 crc kubenswrapper[5039]: I1124 15:30:29.172141 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dlptg_592272ed-6a8c-42d9-8c87-b62ba335267c/registry-server/0.log" Nov 24 15:30:29 crc kubenswrapper[5039]: I1124 15:30:29.179375 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk_8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130/util/0.log" Nov 24 15:30:29 crc kubenswrapper[5039]: I1124 15:30:29.200280 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk_8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130/pull/0.log" Nov 24 15:30:29 crc kubenswrapper[5039]: I1124 15:30:29.273815 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk_8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130/pull/0.log" Nov 24 15:30:29 crc kubenswrapper[5039]: I1124 15:30:29.307124 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:30:29 crc kubenswrapper[5039]: E1124 15:30:29.307387 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:30:29 crc kubenswrapper[5039]: I1124 15:30:29.422971 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk_8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130/util/0.log" Nov 24 15:30:29 crc kubenswrapper[5039]: I1124 15:30:29.461012 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk_8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130/pull/0.log" Nov 24 15:30:29 crc kubenswrapper[5039]: I1124 15:30:29.471772 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tv4pk_8a7b6ca0-68e4-44d2-a9b4-d0c4a9198130/extract/0.log" Nov 24 15:30:29 crc kubenswrapper[5039]: I1124 15:30:29.475420 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-zhpf5_c0bf8d9e-d6fb-400d-8fa2-d547a9a64107/marketplace-operator/0.log" Nov 24 15:30:29 crc kubenswrapper[5039]: I1124 15:30:29.622060 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-c92b4_91b2d0f6-ea36-4860-aa8a-2645a1a44741/extract-utilities/0.log" Nov 24 15:30:29 crc kubenswrapper[5039]: I1124 15:30:29.796924 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-c92b4_91b2d0f6-ea36-4860-aa8a-2645a1a44741/extract-utilities/0.log" Nov 24 15:30:29 crc kubenswrapper[5039]: I1124 15:30:29.820755 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-c92b4_91b2d0f6-ea36-4860-aa8a-2645a1a44741/extract-content/0.log" Nov 24 15:30:29 crc kubenswrapper[5039]: I1124 15:30:29.853085 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-c92b4_91b2d0f6-ea36-4860-aa8a-2645a1a44741/extract-content/0.log" Nov 24 15:30:30 crc kubenswrapper[5039]: I1124 15:30:30.008010 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-c92b4_91b2d0f6-ea36-4860-aa8a-2645a1a44741/extract-content/0.log" Nov 24 15:30:30 crc kubenswrapper[5039]: I1124 15:30:30.034904 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-c92b4_91b2d0f6-ea36-4860-aa8a-2645a1a44741/extract-utilities/0.log" Nov 24 15:30:30 crc kubenswrapper[5039]: I1124 15:30:30.051318 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bkhz2_36fbfba2-65ab-44a5-9e69-0f6c67426f55/extract-utilities/0.log" Nov 24 15:30:30 crc kubenswrapper[5039]: I1124 15:30:30.291387 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bkhz2_36fbfba2-65ab-44a5-9e69-0f6c67426f55/extract-content/0.log" Nov 24 15:30:30 crc kubenswrapper[5039]: I1124 15:30:30.328672 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-c92b4_91b2d0f6-ea36-4860-aa8a-2645a1a44741/registry-server/0.log" Nov 24 15:30:30 crc kubenswrapper[5039]: I1124 15:30:30.330898 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bkhz2_36fbfba2-65ab-44a5-9e69-0f6c67426f55/extract-content/0.log" Nov 24 15:30:30 crc kubenswrapper[5039]: I1124 15:30:30.331975 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bkhz2_36fbfba2-65ab-44a5-9e69-0f6c67426f55/extract-utilities/0.log" Nov 24 15:30:30 crc kubenswrapper[5039]: I1124 15:30:30.515334 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bkhz2_36fbfba2-65ab-44a5-9e69-0f6c67426f55/extract-utilities/0.log" Nov 24 15:30:30 crc kubenswrapper[5039]: I1124 15:30:30.540872 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bkhz2_36fbfba2-65ab-44a5-9e69-0f6c67426f55/extract-content/0.log" Nov 24 15:30:30 crc kubenswrapper[5039]: I1124 15:30:30.999925 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bkhz2_36fbfba2-65ab-44a5-9e69-0f6c67426f55/registry-server/0.log" Nov 24 15:30:42 crc kubenswrapper[5039]: I1124 15:30:42.306465 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:30:42 crc kubenswrapper[5039]: E1124 15:30:42.307305 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:30:45 crc kubenswrapper[5039]: I1124 15:30:45.465685 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-dnvjn_21190c12-076c-4263-a68d-6dc4117e1d10/prometheus-operator/0.log" Nov 24 15:30:45 crc kubenswrapper[5039]: I1124 15:30:45.689160 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-657sq_eaf06631-af5c-4dd1-8b38-e3f94b8c4c9e/prometheus-operator-admission-webhook/0.log" Nov 24 15:30:45 crc kubenswrapper[5039]: I1124 15:30:45.761095 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5c8c7dc9c6-npb6w_79c1d6a0-9ed7-48c8-8a09-e4695a89d953/prometheus-operator-admission-webhook/0.log" Nov 24 15:30:45 crc kubenswrapper[5039]: I1124 15:30:45.908940 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-6t44d_dcdfb73e-765a-4fba-bdcb-0ca1cd215211/operator/0.log" Nov 24 15:30:46 crc kubenswrapper[5039]: I1124 15:30:46.012473 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-7d5fb4cbfb-s2gqw_7d53a76d-409b-45a1-8000-d4f8f2b1ac18/observability-ui-dashboards/0.log" Nov 24 15:30:46 crc kubenswrapper[5039]: I1124 15:30:46.076288 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-kkzn2_f6b4ca0c-993f-4e33-b14a-ff3c6c12ef7b/perses-operator/0.log" Nov 24 15:30:55 crc kubenswrapper[5039]: I1124 15:30:55.307939 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:30:55 crc kubenswrapper[5039]: E1124 15:30:55.309649 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:31:00 crc kubenswrapper[5039]: I1124 15:31:00.659935 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-dfbf69d45-vngzb_e97f0fac-4f42-4ea9-b853-33c7aedeba68/kube-rbac-proxy/0.log" Nov 24 15:31:00 crc kubenswrapper[5039]: I1124 15:31:00.762186 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-dfbf69d45-vngzb_e97f0fac-4f42-4ea9-b853-33c7aedeba68/manager/0.log" Nov 24 15:31:02 crc kubenswrapper[5039]: I1124 15:31:02.100539 5039 scope.go:117] "RemoveContainer" containerID="f9c555e37530ef860701043c4e3f5140f92b20ea397d6c322b77dffdd62f4782" Nov 24 15:31:06 crc kubenswrapper[5039]: I1124 15:31:06.307821 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:31:06 crc kubenswrapper[5039]: E1124 15:31:06.308482 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:31:18 crc kubenswrapper[5039]: I1124 15:31:18.316774 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:31:18 crc kubenswrapper[5039]: E1124 15:31:18.317839 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:31:34 crc kubenswrapper[5039]: I1124 15:31:34.307338 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:31:34 crc kubenswrapper[5039]: E1124 15:31:34.308306 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:31:47 crc kubenswrapper[5039]: I1124 15:31:47.319812 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:31:47 crc kubenswrapper[5039]: E1124 15:31:47.321129 5039 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8x5rg_openshift-machine-config-operator(ce86b4cd-2cb0-4cec-8b42-22a855734a60)\"" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" Nov 24 15:32:00 crc kubenswrapper[5039]: I1124 15:32:00.309619 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:32:01 crc kubenswrapper[5039]: I1124 15:32:01.575193 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"576828d499a77a6114c0bec28002d38dc7e33fdabcfafd800345a479b59e69de"} Nov 24 15:32:58 crc kubenswrapper[5039]: I1124 15:32:58.393365 5039 generic.go:334] "Generic (PLEG): container finished" podID="fd30c717-0e76-4e6f-bcc3-e48a24658894" containerID="a6ffa8f2dca749e749f1c08c2a067b9f9760149697cab4558a97327536b75575" exitCode=0 Nov 24 15:32:58 crc kubenswrapper[5039]: I1124 15:32:58.393600 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-k88gt/must-gather-wtzc2" event={"ID":"fd30c717-0e76-4e6f-bcc3-e48a24658894","Type":"ContainerDied","Data":"a6ffa8f2dca749e749f1c08c2a067b9f9760149697cab4558a97327536b75575"} Nov 24 15:32:58 crc kubenswrapper[5039]: I1124 15:32:58.395427 5039 scope.go:117] "RemoveContainer" containerID="a6ffa8f2dca749e749f1c08c2a067b9f9760149697cab4558a97327536b75575" Nov 24 15:32:58 crc kubenswrapper[5039]: I1124 15:32:58.565735 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-k88gt_must-gather-wtzc2_fd30c717-0e76-4e6f-bcc3-e48a24658894/gather/0.log" Nov 24 15:33:02 crc kubenswrapper[5039]: I1124 15:33:02.262180 5039 scope.go:117] "RemoveContainer" containerID="af8704e96204502a5a549f9c92037e1323f761545ec2d472bcacfcc7d1e27394" Nov 24 15:33:10 crc kubenswrapper[5039]: I1124 15:33:10.050261 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-k88gt/must-gather-wtzc2"] Nov 24 15:33:10 crc kubenswrapper[5039]: I1124 15:33:10.051087 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-k88gt/must-gather-wtzc2" podUID="fd30c717-0e76-4e6f-bcc3-e48a24658894" containerName="copy" containerID="cri-o://50e62532f0df608b90adcb17c9f88f289586fb2d7008c7cf4d47d150c5617038" gracePeriod=2 Nov 24 15:33:10 crc kubenswrapper[5039]: I1124 15:33:10.066283 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-k88gt/must-gather-wtzc2"] Nov 24 15:33:10 crc kubenswrapper[5039]: I1124 15:33:10.568459 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-k88gt_must-gather-wtzc2_fd30c717-0e76-4e6f-bcc3-e48a24658894/copy/0.log" Nov 24 15:33:10 crc kubenswrapper[5039]: I1124 15:33:10.569260 5039 generic.go:334] "Generic (PLEG): container finished" podID="fd30c717-0e76-4e6f-bcc3-e48a24658894" containerID="50e62532f0df608b90adcb17c9f88f289586fb2d7008c7cf4d47d150c5617038" exitCode=143 Nov 24 15:33:10 crc kubenswrapper[5039]: I1124 15:33:10.569306 5039 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="acb0f6ae9d8cf6d51eaf592df648237feb76219232ab4053ccd183c2617f1147" Nov 24 15:33:10 crc kubenswrapper[5039]: I1124 15:33:10.647001 5039 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-k88gt_must-gather-wtzc2_fd30c717-0e76-4e6f-bcc3-e48a24658894/copy/0.log" Nov 24 15:33:10 crc kubenswrapper[5039]: I1124 15:33:10.647624 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-k88gt/must-gather-wtzc2" Nov 24 15:33:10 crc kubenswrapper[5039]: I1124 15:33:10.798284 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cq46j\" (UniqueName: \"kubernetes.io/projected/fd30c717-0e76-4e6f-bcc3-e48a24658894-kube-api-access-cq46j\") pod \"fd30c717-0e76-4e6f-bcc3-e48a24658894\" (UID: \"fd30c717-0e76-4e6f-bcc3-e48a24658894\") " Nov 24 15:33:10 crc kubenswrapper[5039]: I1124 15:33:10.798650 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/fd30c717-0e76-4e6f-bcc3-e48a24658894-must-gather-output\") pod \"fd30c717-0e76-4e6f-bcc3-e48a24658894\" (UID: \"fd30c717-0e76-4e6f-bcc3-e48a24658894\") " Nov 24 15:33:10 crc kubenswrapper[5039]: I1124 15:33:10.807978 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd30c717-0e76-4e6f-bcc3-e48a24658894-kube-api-access-cq46j" (OuterVolumeSpecName: "kube-api-access-cq46j") pod "fd30c717-0e76-4e6f-bcc3-e48a24658894" (UID: "fd30c717-0e76-4e6f-bcc3-e48a24658894"). InnerVolumeSpecName "kube-api-access-cq46j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:33:10 crc kubenswrapper[5039]: I1124 15:33:10.903231 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cq46j\" (UniqueName: \"kubernetes.io/projected/fd30c717-0e76-4e6f-bcc3-e48a24658894-kube-api-access-cq46j\") on node \"crc\" DevicePath \"\"" Nov 24 15:33:11 crc kubenswrapper[5039]: I1124 15:33:11.011770 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd30c717-0e76-4e6f-bcc3-e48a24658894-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "fd30c717-0e76-4e6f-bcc3-e48a24658894" (UID: "fd30c717-0e76-4e6f-bcc3-e48a24658894"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:33:11 crc kubenswrapper[5039]: I1124 15:33:11.107794 5039 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/fd30c717-0e76-4e6f-bcc3-e48a24658894-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 24 15:33:11 crc kubenswrapper[5039]: I1124 15:33:11.577101 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-k88gt/must-gather-wtzc2" Nov 24 15:33:12 crc kubenswrapper[5039]: I1124 15:33:12.327771 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd30c717-0e76-4e6f-bcc3-e48a24658894" path="/var/lib/kubelet/pods/fd30c717-0e76-4e6f-bcc3-e48a24658894/volumes" Nov 24 15:33:58 crc kubenswrapper[5039]: I1124 15:33:58.743187 5039 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-cwlzw"] Nov 24 15:33:58 crc kubenswrapper[5039]: E1124 15:33:58.744195 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd30c717-0e76-4e6f-bcc3-e48a24658894" containerName="gather" Nov 24 15:33:58 crc kubenswrapper[5039]: I1124 15:33:58.744210 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd30c717-0e76-4e6f-bcc3-e48a24658894" containerName="gather" Nov 24 15:33:58 crc kubenswrapper[5039]: E1124 15:33:58.744267 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd30c717-0e76-4e6f-bcc3-e48a24658894" containerName="copy" Nov 24 15:33:58 crc kubenswrapper[5039]: I1124 15:33:58.744276 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd30c717-0e76-4e6f-bcc3-e48a24658894" containerName="copy" Nov 24 15:33:58 crc kubenswrapper[5039]: E1124 15:33:58.744293 5039 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e471579c-a9d3-458c-9149-99a9f77c416e" containerName="collect-profiles" Nov 24 15:33:58 crc kubenswrapper[5039]: I1124 15:33:58.744302 5039 state_mem.go:107] "Deleted CPUSet assignment" podUID="e471579c-a9d3-458c-9149-99a9f77c416e" containerName="collect-profiles" Nov 24 15:33:58 crc kubenswrapper[5039]: I1124 15:33:58.744566 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd30c717-0e76-4e6f-bcc3-e48a24658894" containerName="copy" Nov 24 15:33:58 crc kubenswrapper[5039]: I1124 15:33:58.744599 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="e471579c-a9d3-458c-9149-99a9f77c416e" containerName="collect-profiles" Nov 24 15:33:58 crc kubenswrapper[5039]: I1124 15:33:58.744613 5039 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd30c717-0e76-4e6f-bcc3-e48a24658894" containerName="gather" Nov 24 15:33:58 crc kubenswrapper[5039]: I1124 15:33:58.746662 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cwlzw" Nov 24 15:33:58 crc kubenswrapper[5039]: I1124 15:33:58.762131 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cwlzw"] Nov 24 15:33:58 crc kubenswrapper[5039]: I1124 15:33:58.876117 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ldtbf\" (UniqueName: \"kubernetes.io/projected/afd97b25-8cf2-41ec-b453-ef3d61a5960f-kube-api-access-ldtbf\") pod \"community-operators-cwlzw\" (UID: \"afd97b25-8cf2-41ec-b453-ef3d61a5960f\") " pod="openshift-marketplace/community-operators-cwlzw" Nov 24 15:33:58 crc kubenswrapper[5039]: I1124 15:33:58.876184 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/afd97b25-8cf2-41ec-b453-ef3d61a5960f-catalog-content\") pod \"community-operators-cwlzw\" (UID: \"afd97b25-8cf2-41ec-b453-ef3d61a5960f\") " pod="openshift-marketplace/community-operators-cwlzw" Nov 24 15:33:58 crc kubenswrapper[5039]: I1124 15:33:58.876234 5039 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/afd97b25-8cf2-41ec-b453-ef3d61a5960f-utilities\") pod \"community-operators-cwlzw\" (UID: \"afd97b25-8cf2-41ec-b453-ef3d61a5960f\") " pod="openshift-marketplace/community-operators-cwlzw" Nov 24 15:33:58 crc kubenswrapper[5039]: I1124 15:33:58.977873 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ldtbf\" (UniqueName: \"kubernetes.io/projected/afd97b25-8cf2-41ec-b453-ef3d61a5960f-kube-api-access-ldtbf\") pod \"community-operators-cwlzw\" (UID: \"afd97b25-8cf2-41ec-b453-ef3d61a5960f\") " pod="openshift-marketplace/community-operators-cwlzw" Nov 24 15:33:58 crc kubenswrapper[5039]: I1124 15:33:58.977938 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/afd97b25-8cf2-41ec-b453-ef3d61a5960f-catalog-content\") pod \"community-operators-cwlzw\" (UID: \"afd97b25-8cf2-41ec-b453-ef3d61a5960f\") " pod="openshift-marketplace/community-operators-cwlzw" Nov 24 15:33:58 crc kubenswrapper[5039]: I1124 15:33:58.977984 5039 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/afd97b25-8cf2-41ec-b453-ef3d61a5960f-utilities\") pod \"community-operators-cwlzw\" (UID: \"afd97b25-8cf2-41ec-b453-ef3d61a5960f\") " pod="openshift-marketplace/community-operators-cwlzw" Nov 24 15:33:58 crc kubenswrapper[5039]: I1124 15:33:58.978462 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/afd97b25-8cf2-41ec-b453-ef3d61a5960f-catalog-content\") pod \"community-operators-cwlzw\" (UID: \"afd97b25-8cf2-41ec-b453-ef3d61a5960f\") " pod="openshift-marketplace/community-operators-cwlzw" Nov 24 15:33:58 crc kubenswrapper[5039]: I1124 15:33:58.978539 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/afd97b25-8cf2-41ec-b453-ef3d61a5960f-utilities\") pod \"community-operators-cwlzw\" (UID: \"afd97b25-8cf2-41ec-b453-ef3d61a5960f\") " pod="openshift-marketplace/community-operators-cwlzw" Nov 24 15:33:59 crc kubenswrapper[5039]: I1124 15:33:59.018431 5039 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ldtbf\" (UniqueName: \"kubernetes.io/projected/afd97b25-8cf2-41ec-b453-ef3d61a5960f-kube-api-access-ldtbf\") pod \"community-operators-cwlzw\" (UID: \"afd97b25-8cf2-41ec-b453-ef3d61a5960f\") " pod="openshift-marketplace/community-operators-cwlzw" Nov 24 15:33:59 crc kubenswrapper[5039]: I1124 15:33:59.073029 5039 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cwlzw" Nov 24 15:33:59 crc kubenswrapper[5039]: I1124 15:33:59.594760 5039 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cwlzw"] Nov 24 15:34:00 crc kubenswrapper[5039]: I1124 15:34:00.225366 5039 generic.go:334] "Generic (PLEG): container finished" podID="afd97b25-8cf2-41ec-b453-ef3d61a5960f" containerID="304e61c5b0e3b7f1b018f578a91d6d78eeea55f06b2510520b4cfce4945b881e" exitCode=0 Nov 24 15:34:00 crc kubenswrapper[5039]: I1124 15:34:00.225483 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cwlzw" event={"ID":"afd97b25-8cf2-41ec-b453-ef3d61a5960f","Type":"ContainerDied","Data":"304e61c5b0e3b7f1b018f578a91d6d78eeea55f06b2510520b4cfce4945b881e"} Nov 24 15:34:00 crc kubenswrapper[5039]: I1124 15:34:00.226660 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cwlzw" event={"ID":"afd97b25-8cf2-41ec-b453-ef3d61a5960f","Type":"ContainerStarted","Data":"ccfa8f4483928562b3de2a410a2b59897be3f0ece65522c302a2f2b70bbb57fe"} Nov 24 15:34:00 crc kubenswrapper[5039]: I1124 15:34:00.228683 5039 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 15:34:02 crc kubenswrapper[5039]: I1124 15:34:02.252192 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cwlzw" event={"ID":"afd97b25-8cf2-41ec-b453-ef3d61a5960f","Type":"ContainerStarted","Data":"ad4d897efc06b173a859bd582219f0544a8a7fe32874a16506616ff39c48e47a"} Nov 24 15:34:02 crc kubenswrapper[5039]: I1124 15:34:02.380847 5039 scope.go:117] "RemoveContainer" containerID="a6ffa8f2dca749e749f1c08c2a067b9f9760149697cab4558a97327536b75575" Nov 24 15:34:02 crc kubenswrapper[5039]: I1124 15:34:02.430696 5039 scope.go:117] "RemoveContainer" containerID="50e62532f0df608b90adcb17c9f88f289586fb2d7008c7cf4d47d150c5617038" Nov 24 15:34:03 crc kubenswrapper[5039]: I1124 15:34:03.275893 5039 generic.go:334] "Generic (PLEG): container finished" podID="afd97b25-8cf2-41ec-b453-ef3d61a5960f" containerID="ad4d897efc06b173a859bd582219f0544a8a7fe32874a16506616ff39c48e47a" exitCode=0 Nov 24 15:34:03 crc kubenswrapper[5039]: I1124 15:34:03.275960 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cwlzw" event={"ID":"afd97b25-8cf2-41ec-b453-ef3d61a5960f","Type":"ContainerDied","Data":"ad4d897efc06b173a859bd582219f0544a8a7fe32874a16506616ff39c48e47a"} Nov 24 15:34:04 crc kubenswrapper[5039]: I1124 15:34:04.295648 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cwlzw" event={"ID":"afd97b25-8cf2-41ec-b453-ef3d61a5960f","Type":"ContainerStarted","Data":"6fff4d6fd783ff736de08776e0df139f4f5d90824c9ec87a9029934976bad831"} Nov 24 15:34:04 crc kubenswrapper[5039]: I1124 15:34:04.323701 5039 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-cwlzw" podStartSLOduration=2.637191668 podStartE2EDuration="6.323682596s" podCreationTimestamp="2025-11-24 15:33:58 +0000 UTC" firstStartedPulling="2025-11-24 15:34:00.228248785 +0000 UTC m=+8152.667373295" lastFinishedPulling="2025-11-24 15:34:03.914739723 +0000 UTC m=+8156.353864223" observedRunningTime="2025-11-24 15:34:04.315535927 +0000 UTC m=+8156.754660427" watchObservedRunningTime="2025-11-24 15:34:04.323682596 +0000 UTC m=+8156.762807096" Nov 24 15:34:09 crc kubenswrapper[5039]: I1124 15:34:09.073598 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-cwlzw" Nov 24 15:34:09 crc kubenswrapper[5039]: I1124 15:34:09.074400 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-cwlzw" Nov 24 15:34:09 crc kubenswrapper[5039]: I1124 15:34:09.160207 5039 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-cwlzw" Nov 24 15:34:09 crc kubenswrapper[5039]: I1124 15:34:09.483934 5039 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-cwlzw" Nov 24 15:34:09 crc kubenswrapper[5039]: I1124 15:34:09.558921 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cwlzw"] Nov 24 15:34:11 crc kubenswrapper[5039]: I1124 15:34:11.402000 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-cwlzw" podUID="afd97b25-8cf2-41ec-b453-ef3d61a5960f" containerName="registry-server" containerID="cri-o://6fff4d6fd783ff736de08776e0df139f4f5d90824c9ec87a9029934976bad831" gracePeriod=2 Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.036573 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cwlzw" Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.183015 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/afd97b25-8cf2-41ec-b453-ef3d61a5960f-catalog-content\") pod \"afd97b25-8cf2-41ec-b453-ef3d61a5960f\" (UID: \"afd97b25-8cf2-41ec-b453-ef3d61a5960f\") " Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.183273 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/afd97b25-8cf2-41ec-b453-ef3d61a5960f-utilities\") pod \"afd97b25-8cf2-41ec-b453-ef3d61a5960f\" (UID: \"afd97b25-8cf2-41ec-b453-ef3d61a5960f\") " Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.184265 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/afd97b25-8cf2-41ec-b453-ef3d61a5960f-utilities" (OuterVolumeSpecName: "utilities") pod "afd97b25-8cf2-41ec-b453-ef3d61a5960f" (UID: "afd97b25-8cf2-41ec-b453-ef3d61a5960f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.184465 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ldtbf\" (UniqueName: \"kubernetes.io/projected/afd97b25-8cf2-41ec-b453-ef3d61a5960f-kube-api-access-ldtbf\") pod \"afd97b25-8cf2-41ec-b453-ef3d61a5960f\" (UID: \"afd97b25-8cf2-41ec-b453-ef3d61a5960f\") " Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.185817 5039 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/afd97b25-8cf2-41ec-b453-ef3d61a5960f-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.192617 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/afd97b25-8cf2-41ec-b453-ef3d61a5960f-kube-api-access-ldtbf" (OuterVolumeSpecName: "kube-api-access-ldtbf") pod "afd97b25-8cf2-41ec-b453-ef3d61a5960f" (UID: "afd97b25-8cf2-41ec-b453-ef3d61a5960f"). InnerVolumeSpecName "kube-api-access-ldtbf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.286909 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/afd97b25-8cf2-41ec-b453-ef3d61a5960f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "afd97b25-8cf2-41ec-b453-ef3d61a5960f" (UID: "afd97b25-8cf2-41ec-b453-ef3d61a5960f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.287581 5039 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/afd97b25-8cf2-41ec-b453-ef3d61a5960f-catalog-content\") pod \"afd97b25-8cf2-41ec-b453-ef3d61a5960f\" (UID: \"afd97b25-8cf2-41ec-b453-ef3d61a5960f\") " Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.288418 5039 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ldtbf\" (UniqueName: \"kubernetes.io/projected/afd97b25-8cf2-41ec-b453-ef3d61a5960f-kube-api-access-ldtbf\") on node \"crc\" DevicePath \"\"" Nov 24 15:34:12 crc kubenswrapper[5039]: W1124 15:34:12.289991 5039 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/afd97b25-8cf2-41ec-b453-ef3d61a5960f/volumes/kubernetes.io~empty-dir/catalog-content Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.290531 5039 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/afd97b25-8cf2-41ec-b453-ef3d61a5960f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "afd97b25-8cf2-41ec-b453-ef3d61a5960f" (UID: "afd97b25-8cf2-41ec-b453-ef3d61a5960f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.391195 5039 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/afd97b25-8cf2-41ec-b453-ef3d61a5960f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.422261 5039 generic.go:334] "Generic (PLEG): container finished" podID="afd97b25-8cf2-41ec-b453-ef3d61a5960f" containerID="6fff4d6fd783ff736de08776e0df139f4f5d90824c9ec87a9029934976bad831" exitCode=0 Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.422312 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cwlzw" event={"ID":"afd97b25-8cf2-41ec-b453-ef3d61a5960f","Type":"ContainerDied","Data":"6fff4d6fd783ff736de08776e0df139f4f5d90824c9ec87a9029934976bad831"} Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.422349 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cwlzw" event={"ID":"afd97b25-8cf2-41ec-b453-ef3d61a5960f","Type":"ContainerDied","Data":"ccfa8f4483928562b3de2a410a2b59897be3f0ece65522c302a2f2b70bbb57fe"} Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.422372 5039 scope.go:117] "RemoveContainer" containerID="6fff4d6fd783ff736de08776e0df139f4f5d90824c9ec87a9029934976bad831" Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.422607 5039 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cwlzw" Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.462234 5039 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cwlzw"] Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.475088 5039 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-cwlzw"] Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.487918 5039 scope.go:117] "RemoveContainer" containerID="ad4d897efc06b173a859bd582219f0544a8a7fe32874a16506616ff39c48e47a" Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.519427 5039 scope.go:117] "RemoveContainer" containerID="304e61c5b0e3b7f1b018f578a91d6d78eeea55f06b2510520b4cfce4945b881e" Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.583818 5039 scope.go:117] "RemoveContainer" containerID="6fff4d6fd783ff736de08776e0df139f4f5d90824c9ec87a9029934976bad831" Nov 24 15:34:12 crc kubenswrapper[5039]: E1124 15:34:12.584403 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6fff4d6fd783ff736de08776e0df139f4f5d90824c9ec87a9029934976bad831\": container with ID starting with 6fff4d6fd783ff736de08776e0df139f4f5d90824c9ec87a9029934976bad831 not found: ID does not exist" containerID="6fff4d6fd783ff736de08776e0df139f4f5d90824c9ec87a9029934976bad831" Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.584469 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6fff4d6fd783ff736de08776e0df139f4f5d90824c9ec87a9029934976bad831"} err="failed to get container status \"6fff4d6fd783ff736de08776e0df139f4f5d90824c9ec87a9029934976bad831\": rpc error: code = NotFound desc = could not find container \"6fff4d6fd783ff736de08776e0df139f4f5d90824c9ec87a9029934976bad831\": container with ID starting with 6fff4d6fd783ff736de08776e0df139f4f5d90824c9ec87a9029934976bad831 not found: ID does not exist" Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.584786 5039 scope.go:117] "RemoveContainer" containerID="ad4d897efc06b173a859bd582219f0544a8a7fe32874a16506616ff39c48e47a" Nov 24 15:34:12 crc kubenswrapper[5039]: E1124 15:34:12.585445 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad4d897efc06b173a859bd582219f0544a8a7fe32874a16506616ff39c48e47a\": container with ID starting with ad4d897efc06b173a859bd582219f0544a8a7fe32874a16506616ff39c48e47a not found: ID does not exist" containerID="ad4d897efc06b173a859bd582219f0544a8a7fe32874a16506616ff39c48e47a" Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.585494 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad4d897efc06b173a859bd582219f0544a8a7fe32874a16506616ff39c48e47a"} err="failed to get container status \"ad4d897efc06b173a859bd582219f0544a8a7fe32874a16506616ff39c48e47a\": rpc error: code = NotFound desc = could not find container \"ad4d897efc06b173a859bd582219f0544a8a7fe32874a16506616ff39c48e47a\": container with ID starting with ad4d897efc06b173a859bd582219f0544a8a7fe32874a16506616ff39c48e47a not found: ID does not exist" Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.585544 5039 scope.go:117] "RemoveContainer" containerID="304e61c5b0e3b7f1b018f578a91d6d78eeea55f06b2510520b4cfce4945b881e" Nov 24 15:34:12 crc kubenswrapper[5039]: E1124 15:34:12.586149 5039 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"304e61c5b0e3b7f1b018f578a91d6d78eeea55f06b2510520b4cfce4945b881e\": container with ID starting with 304e61c5b0e3b7f1b018f578a91d6d78eeea55f06b2510520b4cfce4945b881e not found: ID does not exist" containerID="304e61c5b0e3b7f1b018f578a91d6d78eeea55f06b2510520b4cfce4945b881e" Nov 24 15:34:12 crc kubenswrapper[5039]: I1124 15:34:12.586206 5039 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"304e61c5b0e3b7f1b018f578a91d6d78eeea55f06b2510520b4cfce4945b881e"} err="failed to get container status \"304e61c5b0e3b7f1b018f578a91d6d78eeea55f06b2510520b4cfce4945b881e\": rpc error: code = NotFound desc = could not find container \"304e61c5b0e3b7f1b018f578a91d6d78eeea55f06b2510520b4cfce4945b881e\": container with ID starting with 304e61c5b0e3b7f1b018f578a91d6d78eeea55f06b2510520b4cfce4945b881e not found: ID does not exist" Nov 24 15:34:14 crc kubenswrapper[5039]: I1124 15:34:14.329149 5039 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="afd97b25-8cf2-41ec-b453-ef3d61a5960f" path="/var/lib/kubelet/pods/afd97b25-8cf2-41ec-b453-ef3d61a5960f/volumes" Nov 24 15:34:20 crc kubenswrapper[5039]: I1124 15:34:20.102184 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 15:34:20 crc kubenswrapper[5039]: I1124 15:34:20.102745 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 15:34:50 crc kubenswrapper[5039]: I1124 15:34:50.101139 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 15:34:50 crc kubenswrapper[5039]: I1124 15:34:50.101874 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 15:35:20 crc kubenswrapper[5039]: I1124 15:35:20.101785 5039 patch_prober.go:28] interesting pod/machine-config-daemon-8x5rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 15:35:20 crc kubenswrapper[5039]: I1124 15:35:20.102562 5039 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 15:35:20 crc kubenswrapper[5039]: I1124 15:35:20.102642 5039 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" Nov 24 15:35:20 crc kubenswrapper[5039]: I1124 15:35:20.104234 5039 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"576828d499a77a6114c0bec28002d38dc7e33fdabcfafd800345a479b59e69de"} pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 15:35:20 crc kubenswrapper[5039]: I1124 15:35:20.104360 5039 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" podUID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerName="machine-config-daemon" containerID="cri-o://576828d499a77a6114c0bec28002d38dc7e33fdabcfafd800345a479b59e69de" gracePeriod=600 Nov 24 15:35:20 crc kubenswrapper[5039]: I1124 15:35:20.378830 5039 generic.go:334] "Generic (PLEG): container finished" podID="ce86b4cd-2cb0-4cec-8b42-22a855734a60" containerID="576828d499a77a6114c0bec28002d38dc7e33fdabcfafd800345a479b59e69de" exitCode=0 Nov 24 15:35:20 crc kubenswrapper[5039]: I1124 15:35:20.378894 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerDied","Data":"576828d499a77a6114c0bec28002d38dc7e33fdabcfafd800345a479b59e69de"} Nov 24 15:35:20 crc kubenswrapper[5039]: I1124 15:35:20.379070 5039 scope.go:117] "RemoveContainer" containerID="5349f24b75b3b5b0c0995b6eb2ad746e623d26acbed0b6b20537e449d3fe12d7" Nov 24 15:35:21 crc kubenswrapper[5039]: I1124 15:35:21.393308 5039 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8x5rg" event={"ID":"ce86b4cd-2cb0-4cec-8b42-22a855734a60","Type":"ContainerStarted","Data":"9277acfa2364639271968012c3fcdb99de747c0b53eefa8a88e4bdbcdba7695b"} var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111075577024456 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111075600017356 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111055035016501 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111055035015451 5ustar corecore